From d9d61e84dd38d75ad59b39ffd489d3d8ed2948d7 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Sat, 19 Sep 2015 20:15:55 +0300 Subject: [PATCH 001/131] draft v1 initial --- src/rocker/build2/build.go | 64 +++++++++++++++++++++++++++++++++ src/rocker/build2/build_test.go | 20 +++++++++++ src/rocker/build2/client.go | 32 +++++++++++++++++ 3 files changed, 116 insertions(+) create mode 100644 src/rocker/build2/build.go create mode 100644 src/rocker/build2/build_test.go create mode 100644 src/rocker/build2/client.go diff --git a/src/rocker/build2/build.go b/src/rocker/build2/build.go new file mode 100644 index 00000000..6ab2f10a --- /dev/null +++ b/src/rocker/build2/build.go @@ -0,0 +1,64 @@ +/*- + * Copyright 2015 Grammarly, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package build2 + +import ( + "fmt" + "io" + "os" + "rocker/template" + + "github.com/fsouza/go-dockerclient" +) + +type BuildConfig struct { + Rockerfile string + OutStream io.Writer + InStream io.ReadCloser + Auth *docker.AuthConfiguration + Vars template.Vars + ContextDir string +} + +type Build struct { + cfg *BuildConfig + client *Client + rockerfileContent string +} + +func New(client *Client, cfg *BuildConfig) (*Build, error) { + b := &Build{ + cfg: cfg, + client: &client, + } + + fd, err := os.Open(b.cfg.Rockerfile) + if err != nil { + return fmt.Errorf("Failed to open file %s, error: %s", b.cfg.Rockerfile, err) + } + defer fd.Close() + + data, err := template.ProcessConfigTemplate(b.cfg.Rockerfile, fd, b.cfg.Vars, map[string]interface{}{}) + if err != nil { + return err + } + b.rockerfileContent = data.String() + + // TODO: print + + return b, nil +} diff --git a/src/rocker/build2/build_test.go b/src/rocker/build2/build_test.go new file mode 100644 index 00000000..c200f2e2 --- /dev/null +++ b/src/rocker/build2/build_test.go @@ -0,0 +1,20 @@ +/*- + * Copyright 2015 Grammarly, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package build2 + +type MockClient struct { +} diff --git a/src/rocker/build2/client.go b/src/rocker/build2/client.go new file mode 100644 index 00000000..5a1b84d1 --- /dev/null +++ b/src/rocker/build2/client.go @@ -0,0 +1,32 @@ +/*- + * Copyright 2015 Grammarly, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package build2 + +import "github.com/fsouza/go-dockerclient" + +type Client interface { +} + +type DockerClient struct { + Client *docker.Client +} + +func New(dockerClient *docker.Client) *DockerClient { + return &DockerClient{ + Client: dockerClient, + } +} From 9a501e26540fd3e91635ab655b0eaf233e6fb897 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Sat, 19 Sep 2015 22:39:13 +0300 Subject: [PATCH 002/131] build plan --- src/rocker/build2/build.go | 40 ++-- src/rocker/build2/build_test.go | 33 +++- src/rocker/build2/client.go | 2 +- src/rocker/build2/commands.go | 90 +++++++++ src/rocker/build2/plan.go | 82 ++++++++ src/rocker/build2/plan_test.go | 270 ++++++++++++++++++++++++++ src/rocker/build2/rockerfile.go | 121 ++++++++++++ src/rocker/build2/rockerfile_test.go | 59 ++++++ src/rocker/build2/testdata/Rockerfile | 34 ++++ src/rocker/template/template.go | 6 +- 10 files changed, 711 insertions(+), 26 deletions(-) create mode 100644 src/rocker/build2/commands.go create mode 100644 src/rocker/build2/plan.go create mode 100644 src/rocker/build2/plan_test.go create mode 100644 src/rocker/build2/rockerfile.go create mode 100644 src/rocker/build2/rockerfile_test.go create mode 100644 src/rocker/build2/testdata/Rockerfile diff --git a/src/rocker/build2/build.go b/src/rocker/build2/build.go index 6ab2f10a..53fdae2d 100644 --- a/src/rocker/build2/build.go +++ b/src/rocker/build2/build.go @@ -19,14 +19,12 @@ package build2 import ( "fmt" "io" - "os" "rocker/template" "github.com/fsouza/go-dockerclient" ) type BuildConfig struct { - Rockerfile string OutStream io.Writer InStream io.ReadCloser Auth *docker.AuthConfiguration @@ -35,30 +33,28 @@ type BuildConfig struct { } type Build struct { - cfg *BuildConfig - client *Client - rockerfileContent string + rockerfile *Rockerfile + cfg BuildConfig + container *docker.Config + client Client } -func New(client *Client, cfg *BuildConfig) (*Build, error) { - b := &Build{ - cfg: cfg, - client: &client, +func New(client Client, rockerfile *Rockerfile, cfg BuildConfig) (b *Build, err error) { + b = &Build{ + rockerfile: rockerfile, + cfg: cfg, + client: client, } - fd, err := os.Open(b.cfg.Rockerfile) - if err != nil { - return fmt.Errorf("Failed to open file %s, error: %s", b.cfg.Rockerfile, err) - } - defer fd.Close() + return b, nil +} - data, err := template.ProcessConfigTemplate(b.cfg.Rockerfile, fd, b.cfg.Vars, map[string]interface{}{}) - if err != nil { - return err +func (b *Build) Run(plan Plan) error { + for k, c := range plan { + fmt.Printf("Step %d: %q\n", k, c) + if err := c.Execute(b); err != nil { + return err + } } - b.rockerfileContent = data.String() - - // TODO: print - - return b, nil + return nil } diff --git a/src/rocker/build2/build_test.go b/src/rocker/build2/build_test.go index c200f2e2..5e31eda3 100644 --- a/src/rocker/build2/build_test.go +++ b/src/rocker/build2/build_test.go @@ -16,5 +16,36 @@ package build2 -type MockClient struct { +import ( + "rocker/template" + "runtime" + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNewBuild(t *testing.T) { + b := makeBuild(t, "FROM ubuntu", BuildConfig{}) + assert.IsType(t, &Rockerfile{}, b.rockerfile) +} + +// internal helpers + +func makeBuild(t *testing.T, rockerfileContent string, cfg BuildConfig) *Build { + pc, _, _, _ := runtime.Caller(1) + fn := runtime.FuncForPC(pc) + + r, err := NewRockerfile(fn.Name(), strings.NewReader(rockerfileContent), template.Vars{}, template.Funs{}) + if err != nil { + t.Fatal(err) + } + + b, err := New(&MockClient{}, r, BuildConfig{}) + if err != nil { + t.Fatal(err) + } + return b } + +type MockClient struct{} diff --git a/src/rocker/build2/client.go b/src/rocker/build2/client.go index 5a1b84d1..0452687f 100644 --- a/src/rocker/build2/client.go +++ b/src/rocker/build2/client.go @@ -25,7 +25,7 @@ type DockerClient struct { Client *docker.Client } -func New(dockerClient *docker.Client) *DockerClient { +func NewDockerClient(dockerClient *docker.Client) *DockerClient { return &DockerClient{ Client: dockerClient, } diff --git a/src/rocker/build2/commands.go b/src/rocker/build2/commands.go new file mode 100644 index 00000000..f19bd20c --- /dev/null +++ b/src/rocker/build2/commands.go @@ -0,0 +1,90 @@ +/*- + * Copyright 2015 Grammarly, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package build2 + +import "fmt" + +type ConfigCommand struct { + name string + args []string + attrs map[string]bool + flags map[string]string + original string +} + +type Command interface { + Execute(b *Build) error +} + +func NewCommand(cfg ConfigCommand) (Command, error) { + // TODO: use reflection + switch cfg.name { + case "from": + return &CommandFrom{cfg}, nil + case "run": + return &CommandRun{cfg}, nil + case "env": + return &CommandEnv{cfg}, nil + case "tag": + return &CommandTag{cfg}, nil + } + return nil, fmt.Errorf("Unknown command: %s", cfg.name) +} + +type CommandFrom struct { + cfg ConfigCommand +} + +func (c *CommandFrom) Execute(b *Build) error { + return nil +} + +type CommandReset struct{} + +func (c *CommandReset) Execute(b *Build) error { + return nil +} + +type CommandCommit struct{} + +func (c *CommandCommit) Execute(b *Build) error { + return nil +} + +type CommandRun struct { + cfg ConfigCommand +} + +func (c *CommandRun) Execute(b *Build) error { + return nil +} + +type CommandEnv struct { + cfg ConfigCommand +} + +func (c *CommandEnv) Execute(b *Build) error { + return nil +} + +type CommandTag struct { + cfg ConfigCommand +} + +func (c *CommandTag) Execute(b *Build) error { + return nil +} diff --git a/src/rocker/build2/plan.go b/src/rocker/build2/plan.go new file mode 100644 index 00000000..b1216073 --- /dev/null +++ b/src/rocker/build2/plan.go @@ -0,0 +1,82 @@ +/*- + * Copyright 2015 Grammarly, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package build2 + +import "strings" + +type Plan []Command + +func NewPlan(b *Build) (plan Plan, err error) { + plan = Plan{} + + commands := b.rockerfile.Commands() + committed := true + + commit := func() { + plan = append(plan, &CommandCommit{}) + committed = true + } + + alwaysCommitBefore := "run attach add copy tag push" + alwaysCommitAfter := "run attach add copy" + neverCommitAfter := "from tag push" + + for i := 0; i < len(commands); i++ { + cfg := commands[i] + + cmd, err := NewCommand(cfg) + if err != nil { + return nil, err + } + + // We want to reset the collected state between FROM instructions + // But do it only if it's not the first FROM + if cfg.name == "from" { + if !committed { + commit() + } + if i > 0 { + plan = append(plan, &CommandReset{}) + } + } + + // Commit before commands that require our state + if strings.Contains(alwaysCommitBefore, cfg.name) && !committed { + commit() + } + + plan = append(plan, cmd) + + // Some commands need immediate commit + if strings.Contains(alwaysCommitAfter, cfg.name) { + commit() + } else if !strings.Contains(neverCommitAfter, cfg.name) { + // Reset the committed state for the rest of commands and + // start collecting them + committed = false + + // If we reached the end of Rockerfile, do the final commit + // As you noticed, the final commit will not happen in the last + // command was TAG, PUSH or FROM + if i == len(commands)-1 { + commit() + } + } + } + + return plan, err +} diff --git a/src/rocker/build2/plan_test.go b/src/rocker/build2/plan_test.go new file mode 100644 index 00000000..6eaec3c1 --- /dev/null +++ b/src/rocker/build2/plan_test.go @@ -0,0 +1,270 @@ +/*- + * Copyright 2015 Grammarly, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package build2 + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestPlan_Basic(t *testing.T) { + p := makePlan(t, ` +FROM ubuntu +`) + + expected := []Command{ + &CommandFrom{}, + } + + assert.Len(t, p, len(expected)) + for i, c := range expected { + assert.IsType(t, c, p[i]) + } +} + +func TestPlan_Run(t *testing.T) { + p := makePlan(t, ` +FROM ubuntu +RUN apt-get update +`) + + expected := []Command{ + &CommandFrom{}, + &CommandRun{}, + &CommandCommit{}, + } + + assert.Len(t, p, len(expected)) + for i, c := range expected { + assert.IsType(t, c, p[i]) + } +} + +func TestPlan_EnvRun(t *testing.T) { + p := makePlan(t, ` +FROM ubuntu +ENV name=web +ENV version=1.2 +RUN apt-get update +`) + + expected := []Command{ + &CommandFrom{}, + &CommandEnv{}, + &CommandEnv{}, + &CommandCommit{}, + &CommandRun{}, + &CommandCommit{}, + } + + assert.Len(t, p, len(expected)) + for i, c := range expected { + assert.IsType(t, c, p[i]) + } +} + +func TestPlan_EnvLast(t *testing.T) { + p := makePlan(t, ` +FROM ubuntu +ENV name=web +`) + + expected := []Command{ + &CommandFrom{}, + &CommandEnv{}, + &CommandCommit{}, + } + + assert.Len(t, p, len(expected)) + for i, c := range expected { + assert.IsType(t, c, p[i]) + } +} + +func TestPlan_TwoFroms(t *testing.T) { + p := makePlan(t, ` +FROM ubuntu +FROM alpine +`) + + expected := []Command{ + &CommandFrom{}, + &CommandReset{}, + &CommandFrom{}, + } + + assert.Len(t, p, len(expected)) + for i, c := range expected { + assert.IsType(t, c, p[i]) + } +} + +func TestPlan_TwoFromsEnvBetween(t *testing.T) { + p := makePlan(t, ` +FROM ubuntu +ENV name=web +FROM alpine +`) + + expected := []Command{ + &CommandFrom{}, + &CommandEnv{}, + &CommandCommit{}, + &CommandReset{}, + &CommandFrom{}, + } + + assert.Len(t, p, len(expected)) + for i, c := range expected { + assert.IsType(t, c, p[i]) + } +} + +func TestPlan_TwoFromsTwoEnvs(t *testing.T) { + p := makePlan(t, ` +FROM ubuntu +ENV mode=build +FROM alpine +ENV mode=run +`) + + expected := []Command{ + &CommandFrom{}, + &CommandEnv{}, + &CommandCommit{}, + &CommandReset{}, + &CommandFrom{}, + &CommandEnv{}, + &CommandCommit{}, + } + + assert.Len(t, p, len(expected)) + for i, c := range expected { + assert.IsType(t, c, p[i]) + } +} + +func TestPlan_TagAtTheEnd(t *testing.T) { + p := makePlan(t, ` +FROM ubuntu +TAG my-build +`) + + expected := []Command{ + &CommandFrom{}, + &CommandTag{}, + } + + assert.Len(t, p, len(expected)) + for i, c := range expected { + assert.IsType(t, c, p[i]) + } +} + +func TestPlan_EnvBeforeTag(t *testing.T) { + p := makePlan(t, ` +FROM ubuntu +ENV type=web +TAG my-build +`) + + expected := []Command{ + &CommandFrom{}, + &CommandEnv{}, + &CommandCommit{}, + &CommandTag{}, + } + + assert.Len(t, p, len(expected)) + for i, c := range expected { + assert.IsType(t, c, p[i]) + } +} + +func TestPlan_TagInTheMiddle(t *testing.T) { + p := makePlan(t, ` +FROM ubuntu +TAG my-build +ENV type=web +`) + + expected := []Command{ + &CommandFrom{}, + &CommandTag{}, + &CommandEnv{}, + &CommandCommit{}, + } + + assert.Len(t, p, len(expected)) + for i, c := range expected { + assert.IsType(t, c, p[i]) + } +} + +func TestPlan_TagBeforeFrom(t *testing.T) { + p := makePlan(t, ` +FROM ubuntu +TAG my-build +FROM alpine +`) + + expected := []Command{ + &CommandFrom{}, + &CommandTag{}, + &CommandReset{}, + &CommandFrom{}, + } + + assert.Len(t, p, len(expected)) + for i, c := range expected { + assert.IsType(t, c, p[i]) + } +} + +func TestPlan_RunBeforeTag(t *testing.T) { + p := makePlan(t, ` +FROM ubuntu +RUN apt-get update +TAG my-build +`) + + expected := []Command{ + &CommandFrom{}, + &CommandRun{}, + &CommandCommit{}, + &CommandTag{}, + } + + assert.Len(t, p, len(expected)) + for i, c := range expected { + assert.IsType(t, c, p[i]) + } +} + +// internal helpers + +func makePlan(t *testing.T, rockerfileContent string) Plan { + b := makeBuild(t, rockerfileContent, BuildConfig{}) + + p, err := NewPlan(b) + if err != nil { + t.Fatal(err) + } + + return p +} diff --git a/src/rocker/build2/rockerfile.go b/src/rocker/build2/rockerfile.go new file mode 100644 index 00000000..80d0d1e4 --- /dev/null +++ b/src/rocker/build2/rockerfile.go @@ -0,0 +1,121 @@ +/*- + * Copyright 2015 Grammarly, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package build2 + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "rocker/parser" + "rocker/template" + "strings" +) + +type Rockerfile struct { + Name string + Source string + Content string + Vars template.Vars + Funs template.Funs + + rootNode *parser.Node +} + +func NewRockerfileFromFile(name string, vars template.Vars, funs template.Funs) (r *Rockerfile, err error) { + fd, err := os.Open(name) + if err != nil { + return nil, err + } + defer fd.Close() + + return NewRockerfile(name, fd, vars, funs) +} + +func NewRockerfile(name string, in io.Reader, vars template.Vars, funs template.Funs) (r *Rockerfile, err error) { + r = &Rockerfile{ + Name: name, + Vars: vars, + Funs: funs, + } + + var ( + source []byte + content *bytes.Buffer + ) + + if source, err = ioutil.ReadAll(in); err != nil { + return nil, fmt.Errorf("Failed to read Rockerfile %s, error: %s", name, err) + } + + r.Source = string(source) + + if content, err = template.Process(name, bytes.NewReader(source), vars, funs); err != nil { + return nil, err + } + + r.Content = content.String() + + if r.rootNode, err = parser.Parse(content); err != nil { + return nil, err + } + + return r, nil +} + +func (r *Rockerfile) Commands() []ConfigCommand { + commands := []ConfigCommand{} + + for i := 0; i < len(r.rootNode.Children); i++ { + node := r.rootNode.Children[i] + + cfg := ConfigCommand{ + name: node.Value, + attrs: node.Attributes, + original: node.Original, + args: []string{}, + flags: parseFlags(node.Flags), + } + + // fill in args and substitute vars + for n := node.Next; n != nil; n = n.Next { + cfg.args = append(cfg.args, n.Value) + } + + commands = append(commands, cfg) + } + + return commands +} + +func parseFlags(flags []string) map[string]string { + result := make(map[string]string) + for _, flag := range flags { + key := flag[2:] + value := "" + + index := strings.Index(key, "=") + if index >= 0 { + value = key[index+1:] + key = key[:index] + } + + result[key] = value + } + return result +} diff --git a/src/rocker/build2/rockerfile_test.go b/src/rocker/build2/rockerfile_test.go new file mode 100644 index 00000000..16ec9be6 --- /dev/null +++ b/src/rocker/build2/rockerfile_test.go @@ -0,0 +1,59 @@ +/*- + * Copyright 2015 Grammarly, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package build2 + +import ( + "rocker/template" + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNewRockerfile_Base(t *testing.T) { + src := `FROM {{ .BaseImage }}` + vars := template.Vars{"BaseImage": "ubuntu"} + r, err := NewRockerfile("test", strings.NewReader(src), vars, template.Funs{}) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, src, r.Source) + assert.Equal(t, "FROM ubuntu", r.Content) +} + +func TestNewRockerfileFromFile(t *testing.T) { + r, err := NewRockerfileFromFile("testdata/Rockerfile", template.Vars{}, template.Funs{}) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, `from "some-java8-image-dev:1"`, r.rootNode.Children[0].Dump()) +} + +func TestRockerfileCommands(t *testing.T) { + src := `FROM ubuntu` + r, err := NewRockerfile("test", strings.NewReader(src), template.Vars{}, template.Funs{}) + if err != nil { + t.Fatal(err) + } + + commands := r.Commands() + assert.Len(t, commands, 1) + assert.Equal(t, "from", commands[0].name) + assert.Equal(t, "ubuntu", commands[0].args[0]) +} diff --git a/src/rocker/build2/testdata/Rockerfile b/src/rocker/build2/testdata/Rockerfile new file mode 100644 index 00000000..7cf5c72e --- /dev/null +++ b/src/rocker/build2/testdata/Rockerfile @@ -0,0 +1,34 @@ +FROM some-java8-image-dev:1 + +# Install nodejs, npm and bower +RUN \ + apt-get update && \ + apt-get install -y nodejs npm && \ + npm install -g bower && \ + rm -rf /var/lib/apt/lists/* + +RUN echo "{ \"allow_root\": true }" > /root/.bowerrc + +RUN ln -sf /usr/bin/nodejs /usr/bin/node + +ADD --user=john --ignore-mtime . /src +WORKDIR /src + +ONBUILD ADD [".", "/"] + +MOUNT /root/.gradle +MOUNT $GIT_SSH_KEY:/root/.ssh/id_rsa + +RUN gradle --refresh-dependencies --stacktrace clean test + +EXPORT /src/corgi-app/build/distributions/app.tar + +#=== + +FROM some-java8-image:1 + +IMPORT app.tar /opt + +CMD ["/sbin/my_init", "/opt/app/bin/app"] + +PUSH mycompany/app:$branch-$version diff --git a/src/rocker/template/template.go b/src/rocker/template/template.go index a620b185..2a329098 100644 --- a/src/rocker/template/template.go +++ b/src/rocker/template/template.go @@ -32,9 +32,11 @@ import ( "github.com/kr/pretty" ) +type Funs map[string]interface{} + // Process renders config through the template processor. // vars and additional functions are acceptable. -func Process(name string, reader io.Reader, vars Vars, funcs map[string]interface{}) (*bytes.Buffer, error) { +func Process(name string, reader io.Reader, vars Vars, funs Funs) (*bytes.Buffer, error) { var buf bytes.Buffer // read template @@ -89,7 +91,7 @@ func Process(name string, reader io.Reader, vars Vars, funcs map[string]interfac "trimSpace": strings.TrimSpace, "trimSuffix": strings.TrimSuffix, } - for k, f := range funcs { + for k, f := range funs { funcMap[k] = f } From d71965f8d4158d739feed3340d0f00e2c7806fbc Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Sat, 19 Sep 2015 23:37:01 +0300 Subject: [PATCH 003/131] vendor: fetch stretchr/objx for mocking --- vendor/manifest | 6 + .../src/github.com/stretchr/objx/LICENSE.md | 23 + vendor/src/github.com/stretchr/objx/README.md | 3 + .../src/github.com/stretchr/objx/accessors.go | 179 + .../stretchr/objx/accessors_test.go | 145 + .../stretchr/objx/codegen/array-access.txt | 14 + .../stretchr/objx/codegen/index.html | 86 + .../stretchr/objx/codegen/template.txt | 286 ++ .../stretchr/objx/codegen/types_list.txt | 20 + .../src/github.com/stretchr/objx/constants.go | 13 + .../github.com/stretchr/objx/conversions.go | 117 + .../stretchr/objx/conversions_test.go | 94 + vendor/src/github.com/stretchr/objx/doc.go | 72 + .../github.com/stretchr/objx/fixture_test.go | 98 + vendor/src/github.com/stretchr/objx/map.go | 222 ++ .../github.com/stretchr/objx/map_for_test.go | 10 + .../src/github.com/stretchr/objx/map_test.go | 147 + .../src/github.com/stretchr/objx/mutations.go | 81 + .../stretchr/objx/mutations_test.go | 77 + .../src/github.com/stretchr/objx/security.go | 14 + .../github.com/stretchr/objx/security_test.go | 12 + .../stretchr/objx/simple_example_test.go | 41 + vendor/src/github.com/stretchr/objx/tests.go | 17 + .../github.com/stretchr/objx/tests_test.go | 24 + .../stretchr/objx/type_specific_codegen.go | 2881 +++++++++++++++++ .../objx/type_specific_codegen_test.go | 2867 ++++++++++++++++ vendor/src/github.com/stretchr/objx/value.go | 13 + .../github.com/stretchr/objx/value_test.go | 1 + 28 files changed, 7563 insertions(+) create mode 100644 vendor/src/github.com/stretchr/objx/LICENSE.md create mode 100644 vendor/src/github.com/stretchr/objx/README.md create mode 100644 vendor/src/github.com/stretchr/objx/accessors.go create mode 100644 vendor/src/github.com/stretchr/objx/accessors_test.go create mode 100644 vendor/src/github.com/stretchr/objx/codegen/array-access.txt create mode 100644 vendor/src/github.com/stretchr/objx/codegen/index.html create mode 100644 vendor/src/github.com/stretchr/objx/codegen/template.txt create mode 100644 vendor/src/github.com/stretchr/objx/codegen/types_list.txt create mode 100644 vendor/src/github.com/stretchr/objx/constants.go create mode 100644 vendor/src/github.com/stretchr/objx/conversions.go create mode 100644 vendor/src/github.com/stretchr/objx/conversions_test.go create mode 100644 vendor/src/github.com/stretchr/objx/doc.go create mode 100644 vendor/src/github.com/stretchr/objx/fixture_test.go create mode 100644 vendor/src/github.com/stretchr/objx/map.go create mode 100644 vendor/src/github.com/stretchr/objx/map_for_test.go create mode 100644 vendor/src/github.com/stretchr/objx/map_test.go create mode 100644 vendor/src/github.com/stretchr/objx/mutations.go create mode 100644 vendor/src/github.com/stretchr/objx/mutations_test.go create mode 100644 vendor/src/github.com/stretchr/objx/security.go create mode 100644 vendor/src/github.com/stretchr/objx/security_test.go create mode 100644 vendor/src/github.com/stretchr/objx/simple_example_test.go create mode 100644 vendor/src/github.com/stretchr/objx/tests.go create mode 100644 vendor/src/github.com/stretchr/objx/tests_test.go create mode 100644 vendor/src/github.com/stretchr/objx/type_specific_codegen.go create mode 100644 vendor/src/github.com/stretchr/objx/type_specific_codegen_test.go create mode 100644 vendor/src/github.com/stretchr/objx/value.go create mode 100644 vendor/src/github.com/stretchr/objx/value_test.go diff --git a/vendor/manifest b/vendor/manifest index 98c2b66a..7fdfb83f 100644 --- a/vendor/manifest +++ b/vendor/manifest @@ -95,6 +95,12 @@ "repository": "https://github.com/fsouza/go-dockerclient", "revision": "c9ad0ce23f68428421adfc6ced9e6123f54788a5", "branch": "master" + }, + { + "importpath": "github.com/stretchr/objx", + "repository": "https://github.com/stretchr/objx", + "revision": "cbeaeb16a013161a98496fad62933b1d21786672", + "branch": "master" } ] } \ No newline at end of file diff --git a/vendor/src/github.com/stretchr/objx/LICENSE.md b/vendor/src/github.com/stretchr/objx/LICENSE.md new file mode 100644 index 00000000..21999458 --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/LICENSE.md @@ -0,0 +1,23 @@ +objx - by Mat Ryer and Tyler Bunnell + +The MIT License (MIT) + +Copyright (c) 2014 Stretchr, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/src/github.com/stretchr/objx/README.md b/vendor/src/github.com/stretchr/objx/README.md new file mode 100644 index 00000000..4aa18068 --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/README.md @@ -0,0 +1,3 @@ +# objx + + * Jump into the [API Documentation](http://godoc.org/github.com/stretchr/objx) diff --git a/vendor/src/github.com/stretchr/objx/accessors.go b/vendor/src/github.com/stretchr/objx/accessors.go new file mode 100644 index 00000000..721bcac7 --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/accessors.go @@ -0,0 +1,179 @@ +package objx + +import ( + "fmt" + "regexp" + "strconv" + "strings" +) + +// arrayAccesRegexString is the regex used to extract the array number +// from the access path +const arrayAccesRegexString = `^(.+)\[([0-9]+)\]$` + +// arrayAccesRegex is the compiled arrayAccesRegexString +var arrayAccesRegex = regexp.MustCompile(arrayAccesRegexString) + +// Get gets the value using the specified selector and +// returns it inside a new Obj object. +// +// If it cannot find the value, Get will return a nil +// value inside an instance of Obj. +// +// Get can only operate directly on map[string]interface{} and []interface. +// +// Example +// +// To access the title of the third chapter of the second book, do: +// +// o.Get("books[1].chapters[2].title") +func (m Map) Get(selector string) *Value { + rawObj := access(m, selector, nil, false, false) + return &Value{data: rawObj} +} + +// Set sets the value using the specified selector and +// returns the object on which Set was called. +// +// Set can only operate directly on map[string]interface{} and []interface +// +// Example +// +// To set the title of the third chapter of the second book, do: +// +// o.Set("books[1].chapters[2].title","Time to Go") +func (m Map) Set(selector string, value interface{}) Map { + access(m, selector, value, true, false) + return m +} + +// access accesses the object using the selector and performs the +// appropriate action. +func access(current, selector, value interface{}, isSet, panics bool) interface{} { + + switch selector.(type) { + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + + if array, ok := current.([]interface{}); ok { + index := intFromInterface(selector) + + if index >= len(array) { + if panics { + panic(fmt.Sprintf("objx: Index %d is out of range. Slice only contains %d items.", index, len(array))) + } + return nil + } + + return array[index] + } + + return nil + + case string: + + selStr := selector.(string) + selSegs := strings.SplitN(selStr, PathSeparator, 2) + thisSel := selSegs[0] + index := -1 + var err error + + // https://github.com/stretchr/objx/issues/12 + if strings.Contains(thisSel, "[") { + + arrayMatches := arrayAccesRegex.FindStringSubmatch(thisSel) + + if len(arrayMatches) > 0 { + + // Get the key into the map + thisSel = arrayMatches[1] + + // Get the index into the array at the key + index, err = strconv.Atoi(arrayMatches[2]) + + if err != nil { + // This should never happen. If it does, something has gone + // seriously wrong. Panic. + panic("objx: Array index is not an integer. Must use array[int].") + } + + } + } + + if curMap, ok := current.(Map); ok { + current = map[string]interface{}(curMap) + } + + // get the object in question + switch current.(type) { + case map[string]interface{}: + curMSI := current.(map[string]interface{}) + if len(selSegs) <= 1 && isSet { + curMSI[thisSel] = value + return nil + } else { + current = curMSI[thisSel] + } + default: + current = nil + } + + if current == nil && panics { + panic(fmt.Sprintf("objx: '%v' invalid on object.", selector)) + } + + // do we need to access the item of an array? + if index > -1 { + if array, ok := current.([]interface{}); ok { + if index < len(array) { + current = array[index] + } else { + if panics { + panic(fmt.Sprintf("objx: Index %d is out of range. Slice only contains %d items.", index, len(array))) + } + current = nil + } + } + } + + if len(selSegs) > 1 { + current = access(current, selSegs[1], value, isSet, panics) + } + + } + + return current + +} + +// intFromInterface converts an interface object to the largest +// representation of an unsigned integer using a type switch and +// assertions +func intFromInterface(selector interface{}) int { + var value int + switch selector.(type) { + case int: + value = selector.(int) + case int8: + value = int(selector.(int8)) + case int16: + value = int(selector.(int16)) + case int32: + value = int(selector.(int32)) + case int64: + value = int(selector.(int64)) + case uint: + value = int(selector.(uint)) + case uint8: + value = int(selector.(uint8)) + case uint16: + value = int(selector.(uint16)) + case uint32: + value = int(selector.(uint32)) + case uint64: + value = int(selector.(uint64)) + default: + panic("objx: array access argument is not an integer type (this should never happen)") + } + + return value +} diff --git a/vendor/src/github.com/stretchr/objx/accessors_test.go b/vendor/src/github.com/stretchr/objx/accessors_test.go new file mode 100644 index 00000000..ce5d8e4a --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/accessors_test.go @@ -0,0 +1,145 @@ +package objx + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestAccessorsAccessGetSingleField(t *testing.T) { + + current := map[string]interface{}{"name": "Tyler"} + assert.Equal(t, "Tyler", access(current, "name", nil, false, true)) + +} +func TestAccessorsAccessGetDeep(t *testing.T) { + + current := map[string]interface{}{"name": map[string]interface{}{"first": "Tyler", "last": "Bunnell"}} + assert.Equal(t, "Tyler", access(current, "name.first", nil, false, true)) + assert.Equal(t, "Bunnell", access(current, "name.last", nil, false, true)) + +} +func TestAccessorsAccessGetDeepDeep(t *testing.T) { + + current := map[string]interface{}{"one": map[string]interface{}{"two": map[string]interface{}{"three": map[string]interface{}{"four": 4}}}} + assert.Equal(t, 4, access(current, "one.two.three.four", nil, false, true)) + +} +func TestAccessorsAccessGetInsideArray(t *testing.T) { + + current := map[string]interface{}{"names": []interface{}{map[string]interface{}{"first": "Tyler", "last": "Bunnell"}, map[string]interface{}{"first": "Capitol", "last": "Bollocks"}}} + assert.Equal(t, "Tyler", access(current, "names[0].first", nil, false, true)) + assert.Equal(t, "Bunnell", access(current, "names[0].last", nil, false, true)) + assert.Equal(t, "Capitol", access(current, "names[1].first", nil, false, true)) + assert.Equal(t, "Bollocks", access(current, "names[1].last", nil, false, true)) + + assert.Panics(t, func() { + access(current, "names[2]", nil, false, true) + }) + assert.Nil(t, access(current, "names[2]", nil, false, false)) + +} + +func TestAccessorsAccessGetFromArrayWithInt(t *testing.T) { + + current := []interface{}{map[string]interface{}{"first": "Tyler", "last": "Bunnell"}, map[string]interface{}{"first": "Capitol", "last": "Bollocks"}} + one := access(current, 0, nil, false, false) + two := access(current, 1, nil, false, false) + three := access(current, 2, nil, false, false) + + assert.Equal(t, "Tyler", one.(map[string]interface{})["first"]) + assert.Equal(t, "Capitol", two.(map[string]interface{})["first"]) + assert.Nil(t, three) + +} + +func TestAccessorsGet(t *testing.T) { + + current := New(map[string]interface{}{"name": "Tyler"}) + assert.Equal(t, "Tyler", current.Get("name").data) + +} + +func TestAccessorsAccessSetSingleField(t *testing.T) { + + current := map[string]interface{}{"name": "Tyler"} + access(current, "name", "Mat", true, false) + assert.Equal(t, current["name"], "Mat") + + access(current, "age", 29, true, true) + assert.Equal(t, current["age"], 29) + +} + +func TestAccessorsAccessSetSingleFieldNotExisting(t *testing.T) { + + current := map[string]interface{}{} + access(current, "name", "Mat", true, false) + assert.Equal(t, current["name"], "Mat") + +} + +func TestAccessorsAccessSetDeep(t *testing.T) { + + current := map[string]interface{}{"name": map[string]interface{}{"first": "Tyler", "last": "Bunnell"}} + + access(current, "name.first", "Mat", true, true) + access(current, "name.last", "Ryer", true, true) + + assert.Equal(t, "Mat", access(current, "name.first", nil, false, true)) + assert.Equal(t, "Ryer", access(current, "name.last", nil, false, true)) + +} +func TestAccessorsAccessSetDeepDeep(t *testing.T) { + + current := map[string]interface{}{"one": map[string]interface{}{"two": map[string]interface{}{"three": map[string]interface{}{"four": 4}}}} + + access(current, "one.two.three.four", 5, true, true) + + assert.Equal(t, 5, access(current, "one.two.three.four", nil, false, true)) + +} +func TestAccessorsAccessSetArray(t *testing.T) { + + current := map[string]interface{}{"names": []interface{}{"Tyler"}} + + access(current, "names[0]", "Mat", true, true) + + assert.Equal(t, "Mat", access(current, "names[0]", nil, false, true)) + +} +func TestAccessorsAccessSetInsideArray(t *testing.T) { + + current := map[string]interface{}{"names": []interface{}{map[string]interface{}{"first": "Tyler", "last": "Bunnell"}, map[string]interface{}{"first": "Capitol", "last": "Bollocks"}}} + + access(current, "names[0].first", "Mat", true, true) + access(current, "names[0].last", "Ryer", true, true) + access(current, "names[1].first", "Captain", true, true) + access(current, "names[1].last", "Underpants", true, true) + + assert.Equal(t, "Mat", access(current, "names[0].first", nil, false, true)) + assert.Equal(t, "Ryer", access(current, "names[0].last", nil, false, true)) + assert.Equal(t, "Captain", access(current, "names[1].first", nil, false, true)) + assert.Equal(t, "Underpants", access(current, "names[1].last", nil, false, true)) + +} + +func TestAccessorsAccessSetFromArrayWithInt(t *testing.T) { + + current := []interface{}{map[string]interface{}{"first": "Tyler", "last": "Bunnell"}, map[string]interface{}{"first": "Capitol", "last": "Bollocks"}} + one := access(current, 0, nil, false, false) + two := access(current, 1, nil, false, false) + three := access(current, 2, nil, false, false) + + assert.Equal(t, "Tyler", one.(map[string]interface{})["first"]) + assert.Equal(t, "Capitol", two.(map[string]interface{})["first"]) + assert.Nil(t, three) + +} + +func TestAccessorsSet(t *testing.T) { + + current := New(map[string]interface{}{"name": "Tyler"}) + current.Set("name", "Mat") + assert.Equal(t, "Mat", current.Get("name").data) + +} diff --git a/vendor/src/github.com/stretchr/objx/codegen/array-access.txt b/vendor/src/github.com/stretchr/objx/codegen/array-access.txt new file mode 100644 index 00000000..30602347 --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/codegen/array-access.txt @@ -0,0 +1,14 @@ + case []{1}: + a := object.([]{1}) + if isSet { + a[index] = value.({1}) + } else { + if index >= len(a) { + if panics { + panic(fmt.Sprintf("objx: Index %d is out of range because the []{1} only contains %d items.", index, len(a))) + } + return nil + } else { + return a[index] + } + } diff --git a/vendor/src/github.com/stretchr/objx/codegen/index.html b/vendor/src/github.com/stretchr/objx/codegen/index.html new file mode 100644 index 00000000..379ffc3c --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/codegen/index.html @@ -0,0 +1,86 @@ + + + + Codegen + + + + + +

+ Template +

+

+ Use {x} as a placeholder for each argument. +

+ + +

+ Arguments (comma separated) +

+

+ One block per line +

+ + +

+ Output +

+ + + + + + + + diff --git a/vendor/src/github.com/stretchr/objx/codegen/template.txt b/vendor/src/github.com/stretchr/objx/codegen/template.txt new file mode 100644 index 00000000..b396900b --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/codegen/template.txt @@ -0,0 +1,286 @@ +/* + {4} ({1} and []{1}) + -------------------------------------------------- +*/ + +// {4} gets the value as a {1}, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) {4}(optionalDefault ...{1}) {1} { + if s, ok := v.data.({1}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return {3} +} + +// Must{4} gets the value as a {1}. +// +// Panics if the object is not a {1}. +func (v *Value) Must{4}() {1} { + return v.data.({1}) +} + +// {4}Slice gets the value as a []{1}, returns the optionalDefault +// value or nil if the value is not a []{1}. +func (v *Value) {4}Slice(optionalDefault ...[]{1}) []{1} { + if s, ok := v.data.([]{1}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// Must{4}Slice gets the value as a []{1}. +// +// Panics if the object is not a []{1}. +func (v *Value) Must{4}Slice() []{1} { + return v.data.([]{1}) +} + +// Is{4} gets whether the object contained is a {1} or not. +func (v *Value) Is{4}() bool { + _, ok := v.data.({1}) + return ok +} + +// Is{4}Slice gets whether the object contained is a []{1} or not. +func (v *Value) Is{4}Slice() bool { + _, ok := v.data.([]{1}) + return ok +} + +// Each{4} calls the specified callback for each object +// in the []{1}. +// +// Panics if the object is the wrong type. +func (v *Value) Each{4}(callback func(int, {1}) bool) *Value { + + for index, val := range v.Must{4}Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// Where{4} uses the specified decider function to select items +// from the []{1}. The object contained in the result will contain +// only the selected items. +func (v *Value) Where{4}(decider func(int, {1}) bool) *Value { + + var selected []{1} + + v.Each{4}(func(index int, val {1}) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data:selected} + +} + +// Group{4} uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]{1}. +func (v *Value) Group{4}(grouper func(int, {1}) string) *Value { + + groups := make(map[string][]{1}) + + v.Each{4}(func(index int, val {1}) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]{1}, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data:groups} + +} + +// Replace{4} uses the specified function to replace each {1}s +// by iterating each item. The data in the returned result will be a +// []{1} containing the replaced items. +func (v *Value) Replace{4}(replacer func(int, {1}) {1}) *Value { + + arr := v.Must{4}Slice() + replaced := make([]{1}, len(arr)) + + v.Each{4}(func(index int, val {1}) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data:replaced} + +} + +// Collect{4} uses the specified collector function to collect a value +// for each of the {1}s in the slice. The data returned will be a +// []interface{}. +func (v *Value) Collect{4}(collector func(int, {1}) interface{}) *Value { + + arr := v.Must{4}Slice() + collected := make([]interface{}, len(arr)) + + v.Each{4}(func(index int, val {1}) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data:collected} +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func Test{4}(t *testing.T) { + + val := {1}( {2} ) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").{4}()) + assert.Equal(t, val, New(m).Get("value").Must{4}()) + assert.Equal(t, {1}({3}), New(m).Get("nothing").{4}()) + assert.Equal(t, val, New(m).Get("nothing").{4}({2})) + + assert.Panics(t, func() { + New(m).Get("age").Must{4}() + }) + +} + +func Test{4}Slice(t *testing.T) { + + val := {1}( {2} ) + m := map[string]interface{}{"value": []{1}{ val }, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").{4}Slice()[0]) + assert.Equal(t, val, New(m).Get("value").Must{4}Slice()[0]) + assert.Equal(t, []{1}(nil), New(m).Get("nothing").{4}Slice()) + assert.Equal(t, val, New(m).Get("nothing").{4}Slice( []{1}{ {1}({2}) } )[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").Must{4}Slice() + }) + +} + +func TestIs{4}(t *testing.T) { + + var v *Value + + v = &Value{data: {1}({2})} + assert.True(t, v.Is{4}()) + + v = &Value{data: []{1}{ {1}({2}) }} + assert.True(t, v.Is{4}Slice()) + +} + +func TestEach{4}(t *testing.T) { + + v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }} + count := 0 + replacedVals := make([]{1}, 0) + assert.Equal(t, v, v.Each{4}(func(i int, val {1}) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.Must{4}Slice()[0]) + assert.Equal(t, replacedVals[1], v.Must{4}Slice()[1]) + assert.Equal(t, replacedVals[2], v.Must{4}Slice()[2]) + +} + +func TestWhere{4}(t *testing.T) { + + v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }} + + selected := v.Where{4}(func(i int, val {1}) bool { + return i%2==0 + }).Must{4}Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroup{4}(t *testing.T) { + + v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }} + + grouped := v.Group{4}(func(i int, val {1}) string { + return fmt.Sprintf("%v", i%2==0) + }).data.(map[string][]{1}) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplace{4}(t *testing.T) { + + v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }} + + rawArr := v.Must{4}Slice() + + replaced := v.Replace{4}(func(index int, val {1}) {1} { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.Must{4}Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollect{4}(t *testing.T) { + + v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }} + + collected := v.Collect{4}(func(index int, val {1}) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} diff --git a/vendor/src/github.com/stretchr/objx/codegen/types_list.txt b/vendor/src/github.com/stretchr/objx/codegen/types_list.txt new file mode 100644 index 00000000..069d43d8 --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/codegen/types_list.txt @@ -0,0 +1,20 @@ +Interface,interface{},"something",nil,Inter +Map,map[string]interface{},map[string]interface{}{"name":"Tyler"},nil,MSI +ObjxMap,(Map),New(1),New(nil),ObjxMap +Bool,bool,true,false,Bool +String,string,"hello","",Str +Int,int,1,0,Int +Int8,int8,1,0,Int8 +Int16,int16,1,0,Int16 +Int32,int32,1,0,Int32 +Int64,int64,1,0,Int64 +Uint,uint,1,0,Uint +Uint8,uint8,1,0,Uint8 +Uint16,uint16,1,0,Uint16 +Uint32,uint32,1,0,Uint32 +Uint64,uint64,1,0,Uint64 +Uintptr,uintptr,1,0,Uintptr +Float32,float32,1,0,Float32 +Float64,float64,1,0,Float64 +Complex64,complex64,1,0,Complex64 +Complex128,complex128,1,0,Complex128 diff --git a/vendor/src/github.com/stretchr/objx/constants.go b/vendor/src/github.com/stretchr/objx/constants.go new file mode 100644 index 00000000..f9eb42a2 --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/constants.go @@ -0,0 +1,13 @@ +package objx + +const ( + // PathSeparator is the character used to separate the elements + // of the keypath. + // + // For example, `location.address.city` + PathSeparator string = "." + + // SignatureSeparator is the character that is used to + // separate the Base64 string from the security signature. + SignatureSeparator = "_" +) diff --git a/vendor/src/github.com/stretchr/objx/conversions.go b/vendor/src/github.com/stretchr/objx/conversions.go new file mode 100644 index 00000000..9cdfa9f9 --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/conversions.go @@ -0,0 +1,117 @@ +package objx + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "net/url" +) + +// JSON converts the contained object to a JSON string +// representation +func (m Map) JSON() (string, error) { + + result, err := json.Marshal(m) + + if err != nil { + err = errors.New("objx: JSON encode failed with: " + err.Error()) + } + + return string(result), err + +} + +// MustJSON converts the contained object to a JSON string +// representation and panics if there is an error +func (m Map) MustJSON() string { + result, err := m.JSON() + if err != nil { + panic(err.Error()) + } + return result +} + +// Base64 converts the contained object to a Base64 string +// representation of the JSON string representation +func (m Map) Base64() (string, error) { + + var buf bytes.Buffer + + jsonData, err := m.JSON() + if err != nil { + return "", err + } + + encoder := base64.NewEncoder(base64.StdEncoding, &buf) + encoder.Write([]byte(jsonData)) + encoder.Close() + + return buf.String(), nil + +} + +// MustBase64 converts the contained object to a Base64 string +// representation of the JSON string representation and panics +// if there is an error +func (m Map) MustBase64() string { + result, err := m.Base64() + if err != nil { + panic(err.Error()) + } + return result +} + +// SignedBase64 converts the contained object to a Base64 string +// representation of the JSON string representation and signs it +// using the provided key. +func (m Map) SignedBase64(key string) (string, error) { + + base64, err := m.Base64() + if err != nil { + return "", err + } + + sig := HashWithKey(base64, key) + + return base64 + SignatureSeparator + sig, nil + +} + +// MustSignedBase64 converts the contained object to a Base64 string +// representation of the JSON string representation and signs it +// using the provided key and panics if there is an error +func (m Map) MustSignedBase64(key string) string { + result, err := m.SignedBase64(key) + if err != nil { + panic(err.Error()) + } + return result +} + +/* + URL Query + ------------------------------------------------ +*/ + +// URLValues creates a url.Values object from an Obj. This +// function requires that the wrapped object be a map[string]interface{} +func (m Map) URLValues() url.Values { + + vals := make(url.Values) + + for k, v := range m { + //TODO: can this be done without sprintf? + vals.Set(k, fmt.Sprintf("%v", v)) + } + + return vals +} + +// URLQuery gets an encoded URL query representing the given +// Obj. This function requires that the wrapped object be a +// map[string]interface{} +func (m Map) URLQuery() (string, error) { + return m.URLValues().Encode(), nil +} diff --git a/vendor/src/github.com/stretchr/objx/conversions_test.go b/vendor/src/github.com/stretchr/objx/conversions_test.go new file mode 100644 index 00000000..e9ccd298 --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/conversions_test.go @@ -0,0 +1,94 @@ +package objx + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestConversionJSON(t *testing.T) { + + jsonString := `{"name":"Mat"}` + o := MustFromJSON(jsonString) + + result, err := o.JSON() + + if assert.NoError(t, err) { + assert.Equal(t, jsonString, result) + } + + assert.Equal(t, jsonString, o.MustJSON()) + +} + +func TestConversionJSONWithError(t *testing.T) { + + o := MSI() + o["test"] = func() {} + + assert.Panics(t, func() { + o.MustJSON() + }) + + _, err := o.JSON() + + assert.Error(t, err) + +} + +func TestConversionBase64(t *testing.T) { + + o := New(map[string]interface{}{"name": "Mat"}) + + result, err := o.Base64() + + if assert.NoError(t, err) { + assert.Equal(t, "eyJuYW1lIjoiTWF0In0=", result) + } + + assert.Equal(t, "eyJuYW1lIjoiTWF0In0=", o.MustBase64()) + +} + +func TestConversionBase64WithError(t *testing.T) { + + o := MSI() + o["test"] = func() {} + + assert.Panics(t, func() { + o.MustBase64() + }) + + _, err := o.Base64() + + assert.Error(t, err) + +} + +func TestConversionSignedBase64(t *testing.T) { + + o := New(map[string]interface{}{"name": "Mat"}) + + result, err := o.SignedBase64("key") + + if assert.NoError(t, err) { + assert.Equal(t, "eyJuYW1lIjoiTWF0In0=_67ee82916f90b2c0d68c903266e8998c9ef0c3d6", result) + } + + assert.Equal(t, "eyJuYW1lIjoiTWF0In0=_67ee82916f90b2c0d68c903266e8998c9ef0c3d6", o.MustSignedBase64("key")) + +} + +func TestConversionSignedBase64WithError(t *testing.T) { + + o := MSI() + o["test"] = func() {} + + assert.Panics(t, func() { + o.MustSignedBase64("key") + }) + + _, err := o.SignedBase64("key") + + assert.Error(t, err) + +} diff --git a/vendor/src/github.com/stretchr/objx/doc.go b/vendor/src/github.com/stretchr/objx/doc.go new file mode 100644 index 00000000..47bf85e4 --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/doc.go @@ -0,0 +1,72 @@ +// objx - Go package for dealing with maps, slices, JSON and other data. +// +// Overview +// +// Objx provides the `objx.Map` type, which is a `map[string]interface{}` that exposes +// a powerful `Get` method (among others) that allows you to easily and quickly get +// access to data within the map, without having to worry too much about type assertions, +// missing data, default values etc. +// +// Pattern +// +// Objx uses a preditable pattern to make access data from within `map[string]interface{}'s +// easy. +// +// Call one of the `objx.` functions to create your `objx.Map` to get going: +// +// m, err := objx.FromJSON(json) +// +// NOTE: Any methods or functions with the `Must` prefix will panic if something goes wrong, +// the rest will be optimistic and try to figure things out without panicking. +// +// Use `Get` to access the value you're interested in. You can use dot and array +// notation too: +// +// m.Get("places[0].latlng") +// +// Once you have saught the `Value` you're interested in, you can use the `Is*` methods +// to determine its type. +// +// if m.Get("code").IsStr() { /* ... */ } +// +// Or you can just assume the type, and use one of the strong type methods to +// extract the real value: +// +// m.Get("code").Int() +// +// If there's no value there (or if it's the wrong type) then a default value +// will be returned, or you can be explicit about the default value. +// +// Get("code").Int(-1) +// +// If you're dealing with a slice of data as a value, Objx provides many useful +// methods for iterating, manipulating and selecting that data. You can find out more +// by exploring the index below. +// +// Reading data +// +// A simple example of how to use Objx: +// +// // use MustFromJSON to make an objx.Map from some JSON +// m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`) +// +// // get the details +// name := m.Get("name").Str() +// age := m.Get("age").Int() +// +// // get their nickname (or use their name if they +// // don't have one) +// nickname := m.Get("nickname").Str(name) +// +// Ranging +// +// Since `objx.Map` is a `map[string]interface{}` you can treat it as such. For +// example, to `range` the data, do what you would expect: +// +// m := objx.MustFromJSON(json) +// for key, value := range m { +// +// /* ... do your magic ... */ +// +// } +package objx diff --git a/vendor/src/github.com/stretchr/objx/fixture_test.go b/vendor/src/github.com/stretchr/objx/fixture_test.go new file mode 100644 index 00000000..27f7d904 --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/fixture_test.go @@ -0,0 +1,98 @@ +package objx + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +var fixtures = []struct { + // name is the name of the fixture (used for reporting + // failures) + name string + // data is the JSON data to be worked on + data string + // get is the argument(s) to pass to Get + get interface{} + // output is the expected output + output interface{} +}{ + { + name: "Simple get", + data: `{"name": "Mat"}`, + get: "name", + output: "Mat", + }, + { + name: "Get with dot notation", + data: `{"address": {"city": "Boulder"}}`, + get: "address.city", + output: "Boulder", + }, + { + name: "Deep get with dot notation", + data: `{"one": {"two": {"three": {"four": "hello"}}}}`, + get: "one.two.three.four", + output: "hello", + }, + { + name: "Get missing with dot notation", + data: `{"one": {"two": {"three": {"four": "hello"}}}}`, + get: "one.ten", + output: nil, + }, + { + name: "Get with array notation", + data: `{"tags": ["one", "two", "three"]}`, + get: "tags[1]", + output: "two", + }, + { + name: "Get with array and dot notation", + data: `{"types": { "tags": ["one", "two", "three"]}}`, + get: "types.tags[1]", + output: "two", + }, + { + name: "Get with array and dot notation - field after array", + data: `{"tags": [{"name":"one"}, {"name":"two"}, {"name":"three"}]}`, + get: "tags[1].name", + output: "two", + }, + { + name: "Complex get with array and dot notation", + data: `{"tags": [{"list": [{"one":"pizza"}]}]}`, + get: "tags[0].list[0].one", + output: "pizza", + }, + { + name: "Get field from within string should be nil", + data: `{"name":"Tyler"}`, + get: "name.something", + output: nil, + }, + { + name: "Get field from within string (using array accessor) should be nil", + data: `{"numbers":["one", "two", "three"]}`, + get: "numbers[0].nope", + output: nil, + }, +} + +func TestFixtures(t *testing.T) { + + for _, fixture := range fixtures { + + m := MustFromJSON(fixture.data) + + // get the value + t.Logf("Running get fixture: \"%s\" (%v)", fixture.name, fixture) + value := m.Get(fixture.get.(string)) + + // make sure it matches + assert.Equal(t, fixture.output, value.data, + "Get fixture \"%s\" failed: %v", fixture.name, fixture, + ) + + } + +} diff --git a/vendor/src/github.com/stretchr/objx/map.go b/vendor/src/github.com/stretchr/objx/map.go new file mode 100644 index 00000000..eb6ed8e2 --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/map.go @@ -0,0 +1,222 @@ +package objx + +import ( + "encoding/base64" + "encoding/json" + "errors" + "io/ioutil" + "net/url" + "strings" +) + +// MSIConvertable is an interface that defines methods for converting your +// custom types to a map[string]interface{} representation. +type MSIConvertable interface { + // MSI gets a map[string]interface{} (msi) representing the + // object. + MSI() map[string]interface{} +} + +// Map provides extended functionality for working with +// untyped data, in particular map[string]interface (msi). +type Map map[string]interface{} + +// Value returns the internal value instance +func (m Map) Value() *Value { + return &Value{data: m} +} + +// Nil represents a nil Map. +var Nil Map = New(nil) + +// New creates a new Map containing the map[string]interface{} in the data argument. +// If the data argument is not a map[string]interface, New attempts to call the +// MSI() method on the MSIConvertable interface to create one. +func New(data interface{}) Map { + if _, ok := data.(map[string]interface{}); !ok { + if converter, ok := data.(MSIConvertable); ok { + data = converter.MSI() + } else { + return nil + } + } + return Map(data.(map[string]interface{})) +} + +// MSI creates a map[string]interface{} and puts it inside a new Map. +// +// The arguments follow a key, value pattern. +// +// Panics +// +// Panics if any key arugment is non-string or if there are an odd number of arguments. +// +// Example +// +// To easily create Maps: +// +// m := objx.MSI("name", "Mat", "age", 29, "subobj", objx.MSI("active", true)) +// +// // creates an Map equivalent to +// m := objx.New(map[string]interface{}{"name": "Mat", "age": 29, "subobj": map[string]interface{}{"active": true}}) +func MSI(keyAndValuePairs ...interface{}) Map { + + newMap := make(map[string]interface{}) + keyAndValuePairsLen := len(keyAndValuePairs) + + if keyAndValuePairsLen%2 != 0 { + panic("objx: MSI must have an even number of arguments following the 'key, value' pattern.") + } + + for i := 0; i < keyAndValuePairsLen; i = i + 2 { + + key := keyAndValuePairs[i] + value := keyAndValuePairs[i+1] + + // make sure the key is a string + keyString, keyStringOK := key.(string) + if !keyStringOK { + panic("objx: MSI must follow 'string, interface{}' pattern. " + keyString + " is not a valid key.") + } + + newMap[keyString] = value + + } + + return New(newMap) +} + +// ****** Conversion Constructors + +// MustFromJSON creates a new Map containing the data specified in the +// jsonString. +// +// Panics if the JSON is invalid. +func MustFromJSON(jsonString string) Map { + o, err := FromJSON(jsonString) + + if err != nil { + panic("objx: MustFromJSON failed with error: " + err.Error()) + } + + return o +} + +// FromJSON creates a new Map containing the data specified in the +// jsonString. +// +// Returns an error if the JSON is invalid. +func FromJSON(jsonString string) (Map, error) { + + var data interface{} + err := json.Unmarshal([]byte(jsonString), &data) + + if err != nil { + return Nil, err + } + + return New(data), nil + +} + +// FromBase64 creates a new Obj containing the data specified +// in the Base64 string. +// +// The string is an encoded JSON string returned by Base64 +func FromBase64(base64String string) (Map, error) { + + decoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(base64String)) + + decoded, err := ioutil.ReadAll(decoder) + if err != nil { + return nil, err + } + + return FromJSON(string(decoded)) +} + +// MustFromBase64 creates a new Obj containing the data specified +// in the Base64 string and panics if there is an error. +// +// The string is an encoded JSON string returned by Base64 +func MustFromBase64(base64String string) Map { + + result, err := FromBase64(base64String) + + if err != nil { + panic("objx: MustFromBase64 failed with error: " + err.Error()) + } + + return result +} + +// FromSignedBase64 creates a new Obj containing the data specified +// in the Base64 string. +// +// The string is an encoded JSON string returned by SignedBase64 +func FromSignedBase64(base64String, key string) (Map, error) { + parts := strings.Split(base64String, SignatureSeparator) + if len(parts) != 2 { + return nil, errors.New("objx: Signed base64 string is malformed.") + } + + sig := HashWithKey(parts[0], key) + if parts[1] != sig { + return nil, errors.New("objx: Signature for base64 data does not match.") + } + + return FromBase64(parts[0]) +} + +// MustFromSignedBase64 creates a new Obj containing the data specified +// in the Base64 string and panics if there is an error. +// +// The string is an encoded JSON string returned by Base64 +func MustFromSignedBase64(base64String, key string) Map { + + result, err := FromSignedBase64(base64String, key) + + if err != nil { + panic("objx: MustFromSignedBase64 failed with error: " + err.Error()) + } + + return result +} + +// FromURLQuery generates a new Obj by parsing the specified +// query. +// +// For queries with multiple values, the first value is selected. +func FromURLQuery(query string) (Map, error) { + + vals, err := url.ParseQuery(query) + + if err != nil { + return nil, err + } + + m := make(map[string]interface{}) + for k, vals := range vals { + m[k] = vals[0] + } + + return New(m), nil +} + +// MustFromURLQuery generates a new Obj by parsing the specified +// query. +// +// For queries with multiple values, the first value is selected. +// +// Panics if it encounters an error +func MustFromURLQuery(query string) Map { + + o, err := FromURLQuery(query) + + if err != nil { + panic("objx: MustFromURLQuery failed with error: " + err.Error()) + } + + return o + +} diff --git a/vendor/src/github.com/stretchr/objx/map_for_test.go b/vendor/src/github.com/stretchr/objx/map_for_test.go new file mode 100644 index 00000000..6beb5067 --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/map_for_test.go @@ -0,0 +1,10 @@ +package objx + +var TestMap map[string]interface{} = map[string]interface{}{ + "name": "Tyler", + "address": map[string]interface{}{ + "city": "Salt Lake City", + "state": "UT", + }, + "numbers": []interface{}{"one", "two", "three", "four", "five"}, +} diff --git a/vendor/src/github.com/stretchr/objx/map_test.go b/vendor/src/github.com/stretchr/objx/map_test.go new file mode 100644 index 00000000..1f8b45c6 --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/map_test.go @@ -0,0 +1,147 @@ +package objx + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +type Convertable struct { + name string +} + +func (c *Convertable) MSI() map[string]interface{} { + return map[string]interface{}{"name": c.name} +} + +type Unconvertable struct { + name string +} + +func TestMapCreation(t *testing.T) { + + o := New(nil) + assert.Nil(t, o) + + o = New("Tyler") + assert.Nil(t, o) + + unconvertable := &Unconvertable{name: "Tyler"} + o = New(unconvertable) + assert.Nil(t, o) + + convertable := &Convertable{name: "Tyler"} + o = New(convertable) + if assert.NotNil(t, convertable) { + assert.Equal(t, "Tyler", o["name"], "Tyler") + } + + o = MSI() + if assert.NotNil(t, o) { + assert.NotNil(t, o) + } + + o = MSI("name", "Tyler") + if assert.NotNil(t, o) { + if assert.NotNil(t, o) { + assert.Equal(t, o["name"], "Tyler") + } + } + +} + +func TestMapMustFromJSONWithError(t *testing.T) { + + _, err := FromJSON(`"name":"Mat"}`) + assert.Error(t, err) + +} + +func TestMapFromJSON(t *testing.T) { + + o := MustFromJSON(`{"name":"Mat"}`) + + if assert.NotNil(t, o) { + if assert.NotNil(t, o) { + assert.Equal(t, "Mat", o["name"]) + } + } + +} + +func TestMapFromJSONWithError(t *testing.T) { + + var m Map + + assert.Panics(t, func() { + m = MustFromJSON(`"name":"Mat"}`) + }) + + assert.Nil(t, m) + +} + +func TestMapFromBase64String(t *testing.T) { + + base64String := "eyJuYW1lIjoiTWF0In0=" + + o, err := FromBase64(base64String) + + if assert.NoError(t, err) { + assert.Equal(t, o.Get("name").Str(), "Mat") + } + + assert.Equal(t, MustFromBase64(base64String).Get("name").Str(), "Mat") + +} + +func TestMapFromBase64StringWithError(t *testing.T) { + + base64String := "eyJuYW1lIjoiTWFasd0In0=" + + _, err := FromBase64(base64String) + + assert.Error(t, err) + + assert.Panics(t, func() { + MustFromBase64(base64String) + }) + +} + +func TestMapFromSignedBase64String(t *testing.T) { + + base64String := "eyJuYW1lIjoiTWF0In0=_67ee82916f90b2c0d68c903266e8998c9ef0c3d6" + + o, err := FromSignedBase64(base64String, "key") + + if assert.NoError(t, err) { + assert.Equal(t, o.Get("name").Str(), "Mat") + } + + assert.Equal(t, MustFromSignedBase64(base64String, "key").Get("name").Str(), "Mat") + +} + +func TestMapFromSignedBase64StringWithError(t *testing.T) { + + base64String := "eyJuYW1lasdIjoiTWF0In0=_67ee82916f90b2c0d68c903266e8998c9ef0c3d6" + + _, err := FromSignedBase64(base64String, "key") + + assert.Error(t, err) + + assert.Panics(t, func() { + MustFromSignedBase64(base64String, "key") + }) + +} + +func TestMapFromURLQuery(t *testing.T) { + + m, err := FromURLQuery("name=tyler&state=UT") + if assert.NoError(t, err) && assert.NotNil(t, m) { + assert.Equal(t, "tyler", m.Get("name").Str()) + assert.Equal(t, "UT", m.Get("state").Str()) + } + +} diff --git a/vendor/src/github.com/stretchr/objx/mutations.go b/vendor/src/github.com/stretchr/objx/mutations.go new file mode 100644 index 00000000..b35c8639 --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/mutations.go @@ -0,0 +1,81 @@ +package objx + +// Exclude returns a new Map with the keys in the specified []string +// excluded. +func (d Map) Exclude(exclude []string) Map { + + excluded := make(Map) + for k, v := range d { + var shouldInclude bool = true + for _, toExclude := range exclude { + if k == toExclude { + shouldInclude = false + break + } + } + if shouldInclude { + excluded[k] = v + } + } + + return excluded +} + +// Copy creates a shallow copy of the Obj. +func (m Map) Copy() Map { + copied := make(map[string]interface{}) + for k, v := range m { + copied[k] = v + } + return New(copied) +} + +// Merge blends the specified map with a copy of this map and returns the result. +// +// Keys that appear in both will be selected from the specified map. +// This method requires that the wrapped object be a map[string]interface{} +func (m Map) Merge(merge Map) Map { + return m.Copy().MergeHere(merge) +} + +// Merge blends the specified map with this map and returns the current map. +// +// Keys that appear in both will be selected from the specified map. The original map +// will be modified. This method requires that +// the wrapped object be a map[string]interface{} +func (m Map) MergeHere(merge Map) Map { + + for k, v := range merge { + m[k] = v + } + + return m + +} + +// Transform builds a new Obj giving the transformer a chance +// to change the keys and values as it goes. This method requires that +// the wrapped object be a map[string]interface{} +func (m Map) Transform(transformer func(key string, value interface{}) (string, interface{})) Map { + newMap := make(map[string]interface{}) + for k, v := range m { + modifiedKey, modifiedVal := transformer(k, v) + newMap[modifiedKey] = modifiedVal + } + return New(newMap) +} + +// TransformKeys builds a new map using the specified key mapping. +// +// Unspecified keys will be unaltered. +// This method requires that the wrapped object be a map[string]interface{} +func (m Map) TransformKeys(mapping map[string]string) Map { + return m.Transform(func(key string, value interface{}) (string, interface{}) { + + if newKey, ok := mapping[key]; ok { + return newKey, value + } + + return key, value + }) +} diff --git a/vendor/src/github.com/stretchr/objx/mutations_test.go b/vendor/src/github.com/stretchr/objx/mutations_test.go new file mode 100644 index 00000000..e20ee23b --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/mutations_test.go @@ -0,0 +1,77 @@ +package objx + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestExclude(t *testing.T) { + + d := make(Map) + d["name"] = "Mat" + d["age"] = 29 + d["secret"] = "ABC" + + excluded := d.Exclude([]string{"secret"}) + + assert.Equal(t, d["name"], excluded["name"]) + assert.Equal(t, d["age"], excluded["age"]) + assert.False(t, excluded.Has("secret"), "secret should be excluded") + +} + +func TestCopy(t *testing.T) { + + d1 := make(map[string]interface{}) + d1["name"] = "Tyler" + d1["location"] = "UT" + + d1Obj := New(d1) + d2Obj := d1Obj.Copy() + + d2Obj["name"] = "Mat" + + assert.Equal(t, d1Obj.Get("name").Str(), "Tyler") + assert.Equal(t, d2Obj.Get("name").Str(), "Mat") + +} + +func TestMerge(t *testing.T) { + + d := make(map[string]interface{}) + d["name"] = "Mat" + + d1 := make(map[string]interface{}) + d1["name"] = "Tyler" + d1["location"] = "UT" + + dObj := New(d) + d1Obj := New(d1) + + merged := dObj.Merge(d1Obj) + + assert.Equal(t, merged.Get("name").Str(), d1Obj.Get("name").Str()) + assert.Equal(t, merged.Get("location").Str(), d1Obj.Get("location").Str()) + assert.Empty(t, dObj.Get("location").Str()) + +} + +func TestMergeHere(t *testing.T) { + + d := make(map[string]interface{}) + d["name"] = "Mat" + + d1 := make(map[string]interface{}) + d1["name"] = "Tyler" + d1["location"] = "UT" + + dObj := New(d) + d1Obj := New(d1) + + merged := dObj.MergeHere(d1Obj) + + assert.Equal(t, dObj, merged, "With MergeHere, it should return the first modified map") + assert.Equal(t, merged.Get("name").Str(), d1Obj.Get("name").Str()) + assert.Equal(t, merged.Get("location").Str(), d1Obj.Get("location").Str()) + assert.Equal(t, merged.Get("location").Str(), dObj.Get("location").Str()) +} diff --git a/vendor/src/github.com/stretchr/objx/security.go b/vendor/src/github.com/stretchr/objx/security.go new file mode 100644 index 00000000..fdd6be9c --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/security.go @@ -0,0 +1,14 @@ +package objx + +import ( + "crypto/sha1" + "encoding/hex" +) + +// HashWithKey hashes the specified string using the security +// key. +func HashWithKey(data, key string) string { + hash := sha1.New() + hash.Write([]byte(data + ":" + key)) + return hex.EncodeToString(hash.Sum(nil)) +} diff --git a/vendor/src/github.com/stretchr/objx/security_test.go b/vendor/src/github.com/stretchr/objx/security_test.go new file mode 100644 index 00000000..8f0898f6 --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/security_test.go @@ -0,0 +1,12 @@ +package objx + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestHashWithKey(t *testing.T) { + + assert.Equal(t, "0ce84d8d01f2c7b6e0882b784429c54d280ea2d9", HashWithKey("abc", "def")) + +} diff --git a/vendor/src/github.com/stretchr/objx/simple_example_test.go b/vendor/src/github.com/stretchr/objx/simple_example_test.go new file mode 100644 index 00000000..5408c7fd --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/simple_example_test.go @@ -0,0 +1,41 @@ +package objx + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestSimpleExample(t *testing.T) { + + // build a map from a JSON object + o := MustFromJSON(`{"name":"Mat","foods":["indian","chinese"], "location":{"county":"hobbiton","city":"the shire"}}`) + + // Map can be used as a straight map[string]interface{} + assert.Equal(t, o["name"], "Mat") + + // Get an Value object + v := o.Get("name") + assert.Equal(t, v, &Value{data: "Mat"}) + + // Test the contained value + assert.False(t, v.IsInt()) + assert.False(t, v.IsBool()) + assert.True(t, v.IsStr()) + + // Get the contained value + assert.Equal(t, v.Str(), "Mat") + + // Get a default value if the contained value is not of the expected type or does not exist + assert.Equal(t, 1, v.Int(1)) + + // Get a value by using array notation + assert.Equal(t, "indian", o.Get("foods[0]").Data()) + + // Set a value by using array notation + o.Set("foods[0]", "italian") + assert.Equal(t, "italian", o.Get("foods[0]").Str()) + + // Get a value by using dot notation + assert.Equal(t, "hobbiton", o.Get("location.county").Str()) + +} diff --git a/vendor/src/github.com/stretchr/objx/tests.go b/vendor/src/github.com/stretchr/objx/tests.go new file mode 100644 index 00000000..d9e0b479 --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/tests.go @@ -0,0 +1,17 @@ +package objx + +// Has gets whether there is something at the specified selector +// or not. +// +// If m is nil, Has will always return false. +func (m Map) Has(selector string) bool { + if m == nil { + return false + } + return !m.Get(selector).IsNil() +} + +// IsNil gets whether the data is nil or not. +func (v *Value) IsNil() bool { + return v == nil || v.data == nil +} diff --git a/vendor/src/github.com/stretchr/objx/tests_test.go b/vendor/src/github.com/stretchr/objx/tests_test.go new file mode 100644 index 00000000..bcc1eb03 --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/tests_test.go @@ -0,0 +1,24 @@ +package objx + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestHas(t *testing.T) { + + m := New(TestMap) + + assert.True(t, m.Has("name")) + assert.True(t, m.Has("address.state")) + assert.True(t, m.Has("numbers[4]")) + + assert.False(t, m.Has("address.state.nope")) + assert.False(t, m.Has("address.nope")) + assert.False(t, m.Has("nope")) + assert.False(t, m.Has("numbers[5]")) + + m = nil + assert.False(t, m.Has("nothing")) + +} diff --git a/vendor/src/github.com/stretchr/objx/type_specific_codegen.go b/vendor/src/github.com/stretchr/objx/type_specific_codegen.go new file mode 100644 index 00000000..f3ecb29b --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/type_specific_codegen.go @@ -0,0 +1,2881 @@ +package objx + +/* + Inter (interface{} and []interface{}) + -------------------------------------------------- +*/ + +// Inter gets the value as a interface{}, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Inter(optionalDefault ...interface{}) interface{} { + if s, ok := v.data.(interface{}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInter gets the value as a interface{}. +// +// Panics if the object is not a interface{}. +func (v *Value) MustInter() interface{} { + return v.data.(interface{}) +} + +// InterSlice gets the value as a []interface{}, returns the optionalDefault +// value or nil if the value is not a []interface{}. +func (v *Value) InterSlice(optionalDefault ...[]interface{}) []interface{} { + if s, ok := v.data.([]interface{}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInterSlice gets the value as a []interface{}. +// +// Panics if the object is not a []interface{}. +func (v *Value) MustInterSlice() []interface{} { + return v.data.([]interface{}) +} + +// IsInter gets whether the object contained is a interface{} or not. +func (v *Value) IsInter() bool { + _, ok := v.data.(interface{}) + return ok +} + +// IsInterSlice gets whether the object contained is a []interface{} or not. +func (v *Value) IsInterSlice() bool { + _, ok := v.data.([]interface{}) + return ok +} + +// EachInter calls the specified callback for each object +// in the []interface{}. +// +// Panics if the object is the wrong type. +func (v *Value) EachInter(callback func(int, interface{}) bool) *Value { + + for index, val := range v.MustInterSlice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereInter uses the specified decider function to select items +// from the []interface{}. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInter(decider func(int, interface{}) bool) *Value { + + var selected []interface{} + + v.EachInter(func(index int, val interface{}) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupInter uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]interface{}. +func (v *Value) GroupInter(grouper func(int, interface{}) string) *Value { + + groups := make(map[string][]interface{}) + + v.EachInter(func(index int, val interface{}) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]interface{}, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceInter uses the specified function to replace each interface{}s +// by iterating each item. The data in the returned result will be a +// []interface{} containing the replaced items. +func (v *Value) ReplaceInter(replacer func(int, interface{}) interface{}) *Value { + + arr := v.MustInterSlice() + replaced := make([]interface{}, len(arr)) + + v.EachInter(func(index int, val interface{}) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectInter uses the specified collector function to collect a value +// for each of the interface{}s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInter(collector func(int, interface{}) interface{}) *Value { + + arr := v.MustInterSlice() + collected := make([]interface{}, len(arr)) + + v.EachInter(func(index int, val interface{}) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + MSI (map[string]interface{} and []map[string]interface{}) + -------------------------------------------------- +*/ + +// MSI gets the value as a map[string]interface{}, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) MSI(optionalDefault ...map[string]interface{}) map[string]interface{} { + if s, ok := v.data.(map[string]interface{}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustMSI gets the value as a map[string]interface{}. +// +// Panics if the object is not a map[string]interface{}. +func (v *Value) MustMSI() map[string]interface{} { + return v.data.(map[string]interface{}) +} + +// MSISlice gets the value as a []map[string]interface{}, returns the optionalDefault +// value or nil if the value is not a []map[string]interface{}. +func (v *Value) MSISlice(optionalDefault ...[]map[string]interface{}) []map[string]interface{} { + if s, ok := v.data.([]map[string]interface{}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustMSISlice gets the value as a []map[string]interface{}. +// +// Panics if the object is not a []map[string]interface{}. +func (v *Value) MustMSISlice() []map[string]interface{} { + return v.data.([]map[string]interface{}) +} + +// IsMSI gets whether the object contained is a map[string]interface{} or not. +func (v *Value) IsMSI() bool { + _, ok := v.data.(map[string]interface{}) + return ok +} + +// IsMSISlice gets whether the object contained is a []map[string]interface{} or not. +func (v *Value) IsMSISlice() bool { + _, ok := v.data.([]map[string]interface{}) + return ok +} + +// EachMSI calls the specified callback for each object +// in the []map[string]interface{}. +// +// Panics if the object is the wrong type. +func (v *Value) EachMSI(callback func(int, map[string]interface{}) bool) *Value { + + for index, val := range v.MustMSISlice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereMSI uses the specified decider function to select items +// from the []map[string]interface{}. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereMSI(decider func(int, map[string]interface{}) bool) *Value { + + var selected []map[string]interface{} + + v.EachMSI(func(index int, val map[string]interface{}) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupMSI uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]map[string]interface{}. +func (v *Value) GroupMSI(grouper func(int, map[string]interface{}) string) *Value { + + groups := make(map[string][]map[string]interface{}) + + v.EachMSI(func(index int, val map[string]interface{}) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]map[string]interface{}, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceMSI uses the specified function to replace each map[string]interface{}s +// by iterating each item. The data in the returned result will be a +// []map[string]interface{} containing the replaced items. +func (v *Value) ReplaceMSI(replacer func(int, map[string]interface{}) map[string]interface{}) *Value { + + arr := v.MustMSISlice() + replaced := make([]map[string]interface{}, len(arr)) + + v.EachMSI(func(index int, val map[string]interface{}) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectMSI uses the specified collector function to collect a value +// for each of the map[string]interface{}s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectMSI(collector func(int, map[string]interface{}) interface{}) *Value { + + arr := v.MustMSISlice() + collected := make([]interface{}, len(arr)) + + v.EachMSI(func(index int, val map[string]interface{}) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + ObjxMap ((Map) and [](Map)) + -------------------------------------------------- +*/ + +// ObjxMap gets the value as a (Map), returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) ObjxMap(optionalDefault ...(Map)) Map { + if s, ok := v.data.((Map)); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return New(nil) +} + +// MustObjxMap gets the value as a (Map). +// +// Panics if the object is not a (Map). +func (v *Value) MustObjxMap() Map { + return v.data.((Map)) +} + +// ObjxMapSlice gets the value as a [](Map), returns the optionalDefault +// value or nil if the value is not a [](Map). +func (v *Value) ObjxMapSlice(optionalDefault ...[](Map)) [](Map) { + if s, ok := v.data.([](Map)); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustObjxMapSlice gets the value as a [](Map). +// +// Panics if the object is not a [](Map). +func (v *Value) MustObjxMapSlice() [](Map) { + return v.data.([](Map)) +} + +// IsObjxMap gets whether the object contained is a (Map) or not. +func (v *Value) IsObjxMap() bool { + _, ok := v.data.((Map)) + return ok +} + +// IsObjxMapSlice gets whether the object contained is a [](Map) or not. +func (v *Value) IsObjxMapSlice() bool { + _, ok := v.data.([](Map)) + return ok +} + +// EachObjxMap calls the specified callback for each object +// in the [](Map). +// +// Panics if the object is the wrong type. +func (v *Value) EachObjxMap(callback func(int, Map) bool) *Value { + + for index, val := range v.MustObjxMapSlice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereObjxMap uses the specified decider function to select items +// from the [](Map). The object contained in the result will contain +// only the selected items. +func (v *Value) WhereObjxMap(decider func(int, Map) bool) *Value { + + var selected [](Map) + + v.EachObjxMap(func(index int, val Map) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupObjxMap uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][](Map). +func (v *Value) GroupObjxMap(grouper func(int, Map) string) *Value { + + groups := make(map[string][](Map)) + + v.EachObjxMap(func(index int, val Map) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([](Map), 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceObjxMap uses the specified function to replace each (Map)s +// by iterating each item. The data in the returned result will be a +// [](Map) containing the replaced items. +func (v *Value) ReplaceObjxMap(replacer func(int, Map) Map) *Value { + + arr := v.MustObjxMapSlice() + replaced := make([](Map), len(arr)) + + v.EachObjxMap(func(index int, val Map) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectObjxMap uses the specified collector function to collect a value +// for each of the (Map)s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectObjxMap(collector func(int, Map) interface{}) *Value { + + arr := v.MustObjxMapSlice() + collected := make([]interface{}, len(arr)) + + v.EachObjxMap(func(index int, val Map) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Bool (bool and []bool) + -------------------------------------------------- +*/ + +// Bool gets the value as a bool, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Bool(optionalDefault ...bool) bool { + if s, ok := v.data.(bool); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return false +} + +// MustBool gets the value as a bool. +// +// Panics if the object is not a bool. +func (v *Value) MustBool() bool { + return v.data.(bool) +} + +// BoolSlice gets the value as a []bool, returns the optionalDefault +// value or nil if the value is not a []bool. +func (v *Value) BoolSlice(optionalDefault ...[]bool) []bool { + if s, ok := v.data.([]bool); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustBoolSlice gets the value as a []bool. +// +// Panics if the object is not a []bool. +func (v *Value) MustBoolSlice() []bool { + return v.data.([]bool) +} + +// IsBool gets whether the object contained is a bool or not. +func (v *Value) IsBool() bool { + _, ok := v.data.(bool) + return ok +} + +// IsBoolSlice gets whether the object contained is a []bool or not. +func (v *Value) IsBoolSlice() bool { + _, ok := v.data.([]bool) + return ok +} + +// EachBool calls the specified callback for each object +// in the []bool. +// +// Panics if the object is the wrong type. +func (v *Value) EachBool(callback func(int, bool) bool) *Value { + + for index, val := range v.MustBoolSlice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereBool uses the specified decider function to select items +// from the []bool. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereBool(decider func(int, bool) bool) *Value { + + var selected []bool + + v.EachBool(func(index int, val bool) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupBool uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]bool. +func (v *Value) GroupBool(grouper func(int, bool) string) *Value { + + groups := make(map[string][]bool) + + v.EachBool(func(index int, val bool) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]bool, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceBool uses the specified function to replace each bools +// by iterating each item. The data in the returned result will be a +// []bool containing the replaced items. +func (v *Value) ReplaceBool(replacer func(int, bool) bool) *Value { + + arr := v.MustBoolSlice() + replaced := make([]bool, len(arr)) + + v.EachBool(func(index int, val bool) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectBool uses the specified collector function to collect a value +// for each of the bools in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectBool(collector func(int, bool) interface{}) *Value { + + arr := v.MustBoolSlice() + collected := make([]interface{}, len(arr)) + + v.EachBool(func(index int, val bool) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Str (string and []string) + -------------------------------------------------- +*/ + +// Str gets the value as a string, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Str(optionalDefault ...string) string { + if s, ok := v.data.(string); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return "" +} + +// MustStr gets the value as a string. +// +// Panics if the object is not a string. +func (v *Value) MustStr() string { + return v.data.(string) +} + +// StrSlice gets the value as a []string, returns the optionalDefault +// value or nil if the value is not a []string. +func (v *Value) StrSlice(optionalDefault ...[]string) []string { + if s, ok := v.data.([]string); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustStrSlice gets the value as a []string. +// +// Panics if the object is not a []string. +func (v *Value) MustStrSlice() []string { + return v.data.([]string) +} + +// IsStr gets whether the object contained is a string or not. +func (v *Value) IsStr() bool { + _, ok := v.data.(string) + return ok +} + +// IsStrSlice gets whether the object contained is a []string or not. +func (v *Value) IsStrSlice() bool { + _, ok := v.data.([]string) + return ok +} + +// EachStr calls the specified callback for each object +// in the []string. +// +// Panics if the object is the wrong type. +func (v *Value) EachStr(callback func(int, string) bool) *Value { + + for index, val := range v.MustStrSlice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereStr uses the specified decider function to select items +// from the []string. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereStr(decider func(int, string) bool) *Value { + + var selected []string + + v.EachStr(func(index int, val string) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupStr uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]string. +func (v *Value) GroupStr(grouper func(int, string) string) *Value { + + groups := make(map[string][]string) + + v.EachStr(func(index int, val string) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]string, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceStr uses the specified function to replace each strings +// by iterating each item. The data in the returned result will be a +// []string containing the replaced items. +func (v *Value) ReplaceStr(replacer func(int, string) string) *Value { + + arr := v.MustStrSlice() + replaced := make([]string, len(arr)) + + v.EachStr(func(index int, val string) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectStr uses the specified collector function to collect a value +// for each of the strings in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectStr(collector func(int, string) interface{}) *Value { + + arr := v.MustStrSlice() + collected := make([]interface{}, len(arr)) + + v.EachStr(func(index int, val string) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Int (int and []int) + -------------------------------------------------- +*/ + +// Int gets the value as a int, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int(optionalDefault ...int) int { + if s, ok := v.data.(int); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt gets the value as a int. +// +// Panics if the object is not a int. +func (v *Value) MustInt() int { + return v.data.(int) +} + +// IntSlice gets the value as a []int, returns the optionalDefault +// value or nil if the value is not a []int. +func (v *Value) IntSlice(optionalDefault ...[]int) []int { + if s, ok := v.data.([]int); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustIntSlice gets the value as a []int. +// +// Panics if the object is not a []int. +func (v *Value) MustIntSlice() []int { + return v.data.([]int) +} + +// IsInt gets whether the object contained is a int or not. +func (v *Value) IsInt() bool { + _, ok := v.data.(int) + return ok +} + +// IsIntSlice gets whether the object contained is a []int or not. +func (v *Value) IsIntSlice() bool { + _, ok := v.data.([]int) + return ok +} + +// EachInt calls the specified callback for each object +// in the []int. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt(callback func(int, int) bool) *Value { + + for index, val := range v.MustIntSlice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereInt uses the specified decider function to select items +// from the []int. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt(decider func(int, int) bool) *Value { + + var selected []int + + v.EachInt(func(index int, val int) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupInt uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int. +func (v *Value) GroupInt(grouper func(int, int) string) *Value { + + groups := make(map[string][]int) + + v.EachInt(func(index int, val int) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceInt uses the specified function to replace each ints +// by iterating each item. The data in the returned result will be a +// []int containing the replaced items. +func (v *Value) ReplaceInt(replacer func(int, int) int) *Value { + + arr := v.MustIntSlice() + replaced := make([]int, len(arr)) + + v.EachInt(func(index int, val int) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectInt uses the specified collector function to collect a value +// for each of the ints in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt(collector func(int, int) interface{}) *Value { + + arr := v.MustIntSlice() + collected := make([]interface{}, len(arr)) + + v.EachInt(func(index int, val int) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Int8 (int8 and []int8) + -------------------------------------------------- +*/ + +// Int8 gets the value as a int8, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int8(optionalDefault ...int8) int8 { + if s, ok := v.data.(int8); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt8 gets the value as a int8. +// +// Panics if the object is not a int8. +func (v *Value) MustInt8() int8 { + return v.data.(int8) +} + +// Int8Slice gets the value as a []int8, returns the optionalDefault +// value or nil if the value is not a []int8. +func (v *Value) Int8Slice(optionalDefault ...[]int8) []int8 { + if s, ok := v.data.([]int8); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInt8Slice gets the value as a []int8. +// +// Panics if the object is not a []int8. +func (v *Value) MustInt8Slice() []int8 { + return v.data.([]int8) +} + +// IsInt8 gets whether the object contained is a int8 or not. +func (v *Value) IsInt8() bool { + _, ok := v.data.(int8) + return ok +} + +// IsInt8Slice gets whether the object contained is a []int8 or not. +func (v *Value) IsInt8Slice() bool { + _, ok := v.data.([]int8) + return ok +} + +// EachInt8 calls the specified callback for each object +// in the []int8. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt8(callback func(int, int8) bool) *Value { + + for index, val := range v.MustInt8Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereInt8 uses the specified decider function to select items +// from the []int8. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt8(decider func(int, int8) bool) *Value { + + var selected []int8 + + v.EachInt8(func(index int, val int8) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupInt8 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int8. +func (v *Value) GroupInt8(grouper func(int, int8) string) *Value { + + groups := make(map[string][]int8) + + v.EachInt8(func(index int, val int8) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int8, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceInt8 uses the specified function to replace each int8s +// by iterating each item. The data in the returned result will be a +// []int8 containing the replaced items. +func (v *Value) ReplaceInt8(replacer func(int, int8) int8) *Value { + + arr := v.MustInt8Slice() + replaced := make([]int8, len(arr)) + + v.EachInt8(func(index int, val int8) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectInt8 uses the specified collector function to collect a value +// for each of the int8s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt8(collector func(int, int8) interface{}) *Value { + + arr := v.MustInt8Slice() + collected := make([]interface{}, len(arr)) + + v.EachInt8(func(index int, val int8) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Int16 (int16 and []int16) + -------------------------------------------------- +*/ + +// Int16 gets the value as a int16, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int16(optionalDefault ...int16) int16 { + if s, ok := v.data.(int16); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt16 gets the value as a int16. +// +// Panics if the object is not a int16. +func (v *Value) MustInt16() int16 { + return v.data.(int16) +} + +// Int16Slice gets the value as a []int16, returns the optionalDefault +// value or nil if the value is not a []int16. +func (v *Value) Int16Slice(optionalDefault ...[]int16) []int16 { + if s, ok := v.data.([]int16); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInt16Slice gets the value as a []int16. +// +// Panics if the object is not a []int16. +func (v *Value) MustInt16Slice() []int16 { + return v.data.([]int16) +} + +// IsInt16 gets whether the object contained is a int16 or not. +func (v *Value) IsInt16() bool { + _, ok := v.data.(int16) + return ok +} + +// IsInt16Slice gets whether the object contained is a []int16 or not. +func (v *Value) IsInt16Slice() bool { + _, ok := v.data.([]int16) + return ok +} + +// EachInt16 calls the specified callback for each object +// in the []int16. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt16(callback func(int, int16) bool) *Value { + + for index, val := range v.MustInt16Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereInt16 uses the specified decider function to select items +// from the []int16. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt16(decider func(int, int16) bool) *Value { + + var selected []int16 + + v.EachInt16(func(index int, val int16) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupInt16 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int16. +func (v *Value) GroupInt16(grouper func(int, int16) string) *Value { + + groups := make(map[string][]int16) + + v.EachInt16(func(index int, val int16) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int16, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceInt16 uses the specified function to replace each int16s +// by iterating each item. The data in the returned result will be a +// []int16 containing the replaced items. +func (v *Value) ReplaceInt16(replacer func(int, int16) int16) *Value { + + arr := v.MustInt16Slice() + replaced := make([]int16, len(arr)) + + v.EachInt16(func(index int, val int16) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectInt16 uses the specified collector function to collect a value +// for each of the int16s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt16(collector func(int, int16) interface{}) *Value { + + arr := v.MustInt16Slice() + collected := make([]interface{}, len(arr)) + + v.EachInt16(func(index int, val int16) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Int32 (int32 and []int32) + -------------------------------------------------- +*/ + +// Int32 gets the value as a int32, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int32(optionalDefault ...int32) int32 { + if s, ok := v.data.(int32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt32 gets the value as a int32. +// +// Panics if the object is not a int32. +func (v *Value) MustInt32() int32 { + return v.data.(int32) +} + +// Int32Slice gets the value as a []int32, returns the optionalDefault +// value or nil if the value is not a []int32. +func (v *Value) Int32Slice(optionalDefault ...[]int32) []int32 { + if s, ok := v.data.([]int32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInt32Slice gets the value as a []int32. +// +// Panics if the object is not a []int32. +func (v *Value) MustInt32Slice() []int32 { + return v.data.([]int32) +} + +// IsInt32 gets whether the object contained is a int32 or not. +func (v *Value) IsInt32() bool { + _, ok := v.data.(int32) + return ok +} + +// IsInt32Slice gets whether the object contained is a []int32 or not. +func (v *Value) IsInt32Slice() bool { + _, ok := v.data.([]int32) + return ok +} + +// EachInt32 calls the specified callback for each object +// in the []int32. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt32(callback func(int, int32) bool) *Value { + + for index, val := range v.MustInt32Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereInt32 uses the specified decider function to select items +// from the []int32. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt32(decider func(int, int32) bool) *Value { + + var selected []int32 + + v.EachInt32(func(index int, val int32) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupInt32 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int32. +func (v *Value) GroupInt32(grouper func(int, int32) string) *Value { + + groups := make(map[string][]int32) + + v.EachInt32(func(index int, val int32) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int32, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceInt32 uses the specified function to replace each int32s +// by iterating each item. The data in the returned result will be a +// []int32 containing the replaced items. +func (v *Value) ReplaceInt32(replacer func(int, int32) int32) *Value { + + arr := v.MustInt32Slice() + replaced := make([]int32, len(arr)) + + v.EachInt32(func(index int, val int32) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectInt32 uses the specified collector function to collect a value +// for each of the int32s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt32(collector func(int, int32) interface{}) *Value { + + arr := v.MustInt32Slice() + collected := make([]interface{}, len(arr)) + + v.EachInt32(func(index int, val int32) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Int64 (int64 and []int64) + -------------------------------------------------- +*/ + +// Int64 gets the value as a int64, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int64(optionalDefault ...int64) int64 { + if s, ok := v.data.(int64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt64 gets the value as a int64. +// +// Panics if the object is not a int64. +func (v *Value) MustInt64() int64 { + return v.data.(int64) +} + +// Int64Slice gets the value as a []int64, returns the optionalDefault +// value or nil if the value is not a []int64. +func (v *Value) Int64Slice(optionalDefault ...[]int64) []int64 { + if s, ok := v.data.([]int64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInt64Slice gets the value as a []int64. +// +// Panics if the object is not a []int64. +func (v *Value) MustInt64Slice() []int64 { + return v.data.([]int64) +} + +// IsInt64 gets whether the object contained is a int64 or not. +func (v *Value) IsInt64() bool { + _, ok := v.data.(int64) + return ok +} + +// IsInt64Slice gets whether the object contained is a []int64 or not. +func (v *Value) IsInt64Slice() bool { + _, ok := v.data.([]int64) + return ok +} + +// EachInt64 calls the specified callback for each object +// in the []int64. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt64(callback func(int, int64) bool) *Value { + + for index, val := range v.MustInt64Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereInt64 uses the specified decider function to select items +// from the []int64. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt64(decider func(int, int64) bool) *Value { + + var selected []int64 + + v.EachInt64(func(index int, val int64) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupInt64 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int64. +func (v *Value) GroupInt64(grouper func(int, int64) string) *Value { + + groups := make(map[string][]int64) + + v.EachInt64(func(index int, val int64) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int64, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceInt64 uses the specified function to replace each int64s +// by iterating each item. The data in the returned result will be a +// []int64 containing the replaced items. +func (v *Value) ReplaceInt64(replacer func(int, int64) int64) *Value { + + arr := v.MustInt64Slice() + replaced := make([]int64, len(arr)) + + v.EachInt64(func(index int, val int64) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectInt64 uses the specified collector function to collect a value +// for each of the int64s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt64(collector func(int, int64) interface{}) *Value { + + arr := v.MustInt64Slice() + collected := make([]interface{}, len(arr)) + + v.EachInt64(func(index int, val int64) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Uint (uint and []uint) + -------------------------------------------------- +*/ + +// Uint gets the value as a uint, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint(optionalDefault ...uint) uint { + if s, ok := v.data.(uint); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint gets the value as a uint. +// +// Panics if the object is not a uint. +func (v *Value) MustUint() uint { + return v.data.(uint) +} + +// UintSlice gets the value as a []uint, returns the optionalDefault +// value or nil if the value is not a []uint. +func (v *Value) UintSlice(optionalDefault ...[]uint) []uint { + if s, ok := v.data.([]uint); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUintSlice gets the value as a []uint. +// +// Panics if the object is not a []uint. +func (v *Value) MustUintSlice() []uint { + return v.data.([]uint) +} + +// IsUint gets whether the object contained is a uint or not. +func (v *Value) IsUint() bool { + _, ok := v.data.(uint) + return ok +} + +// IsUintSlice gets whether the object contained is a []uint or not. +func (v *Value) IsUintSlice() bool { + _, ok := v.data.([]uint) + return ok +} + +// EachUint calls the specified callback for each object +// in the []uint. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint(callback func(int, uint) bool) *Value { + + for index, val := range v.MustUintSlice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereUint uses the specified decider function to select items +// from the []uint. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint(decider func(int, uint) bool) *Value { + + var selected []uint + + v.EachUint(func(index int, val uint) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupUint uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint. +func (v *Value) GroupUint(grouper func(int, uint) string) *Value { + + groups := make(map[string][]uint) + + v.EachUint(func(index int, val uint) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceUint uses the specified function to replace each uints +// by iterating each item. The data in the returned result will be a +// []uint containing the replaced items. +func (v *Value) ReplaceUint(replacer func(int, uint) uint) *Value { + + arr := v.MustUintSlice() + replaced := make([]uint, len(arr)) + + v.EachUint(func(index int, val uint) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectUint uses the specified collector function to collect a value +// for each of the uints in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint(collector func(int, uint) interface{}) *Value { + + arr := v.MustUintSlice() + collected := make([]interface{}, len(arr)) + + v.EachUint(func(index int, val uint) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Uint8 (uint8 and []uint8) + -------------------------------------------------- +*/ + +// Uint8 gets the value as a uint8, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint8(optionalDefault ...uint8) uint8 { + if s, ok := v.data.(uint8); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint8 gets the value as a uint8. +// +// Panics if the object is not a uint8. +func (v *Value) MustUint8() uint8 { + return v.data.(uint8) +} + +// Uint8Slice gets the value as a []uint8, returns the optionalDefault +// value or nil if the value is not a []uint8. +func (v *Value) Uint8Slice(optionalDefault ...[]uint8) []uint8 { + if s, ok := v.data.([]uint8); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUint8Slice gets the value as a []uint8. +// +// Panics if the object is not a []uint8. +func (v *Value) MustUint8Slice() []uint8 { + return v.data.([]uint8) +} + +// IsUint8 gets whether the object contained is a uint8 or not. +func (v *Value) IsUint8() bool { + _, ok := v.data.(uint8) + return ok +} + +// IsUint8Slice gets whether the object contained is a []uint8 or not. +func (v *Value) IsUint8Slice() bool { + _, ok := v.data.([]uint8) + return ok +} + +// EachUint8 calls the specified callback for each object +// in the []uint8. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint8(callback func(int, uint8) bool) *Value { + + for index, val := range v.MustUint8Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereUint8 uses the specified decider function to select items +// from the []uint8. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint8(decider func(int, uint8) bool) *Value { + + var selected []uint8 + + v.EachUint8(func(index int, val uint8) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupUint8 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint8. +func (v *Value) GroupUint8(grouper func(int, uint8) string) *Value { + + groups := make(map[string][]uint8) + + v.EachUint8(func(index int, val uint8) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint8, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceUint8 uses the specified function to replace each uint8s +// by iterating each item. The data in the returned result will be a +// []uint8 containing the replaced items. +func (v *Value) ReplaceUint8(replacer func(int, uint8) uint8) *Value { + + arr := v.MustUint8Slice() + replaced := make([]uint8, len(arr)) + + v.EachUint8(func(index int, val uint8) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectUint8 uses the specified collector function to collect a value +// for each of the uint8s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint8(collector func(int, uint8) interface{}) *Value { + + arr := v.MustUint8Slice() + collected := make([]interface{}, len(arr)) + + v.EachUint8(func(index int, val uint8) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Uint16 (uint16 and []uint16) + -------------------------------------------------- +*/ + +// Uint16 gets the value as a uint16, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint16(optionalDefault ...uint16) uint16 { + if s, ok := v.data.(uint16); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint16 gets the value as a uint16. +// +// Panics if the object is not a uint16. +func (v *Value) MustUint16() uint16 { + return v.data.(uint16) +} + +// Uint16Slice gets the value as a []uint16, returns the optionalDefault +// value or nil if the value is not a []uint16. +func (v *Value) Uint16Slice(optionalDefault ...[]uint16) []uint16 { + if s, ok := v.data.([]uint16); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUint16Slice gets the value as a []uint16. +// +// Panics if the object is not a []uint16. +func (v *Value) MustUint16Slice() []uint16 { + return v.data.([]uint16) +} + +// IsUint16 gets whether the object contained is a uint16 or not. +func (v *Value) IsUint16() bool { + _, ok := v.data.(uint16) + return ok +} + +// IsUint16Slice gets whether the object contained is a []uint16 or not. +func (v *Value) IsUint16Slice() bool { + _, ok := v.data.([]uint16) + return ok +} + +// EachUint16 calls the specified callback for each object +// in the []uint16. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint16(callback func(int, uint16) bool) *Value { + + for index, val := range v.MustUint16Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereUint16 uses the specified decider function to select items +// from the []uint16. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint16(decider func(int, uint16) bool) *Value { + + var selected []uint16 + + v.EachUint16(func(index int, val uint16) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupUint16 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint16. +func (v *Value) GroupUint16(grouper func(int, uint16) string) *Value { + + groups := make(map[string][]uint16) + + v.EachUint16(func(index int, val uint16) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint16, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceUint16 uses the specified function to replace each uint16s +// by iterating each item. The data in the returned result will be a +// []uint16 containing the replaced items. +func (v *Value) ReplaceUint16(replacer func(int, uint16) uint16) *Value { + + arr := v.MustUint16Slice() + replaced := make([]uint16, len(arr)) + + v.EachUint16(func(index int, val uint16) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectUint16 uses the specified collector function to collect a value +// for each of the uint16s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint16(collector func(int, uint16) interface{}) *Value { + + arr := v.MustUint16Slice() + collected := make([]interface{}, len(arr)) + + v.EachUint16(func(index int, val uint16) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Uint32 (uint32 and []uint32) + -------------------------------------------------- +*/ + +// Uint32 gets the value as a uint32, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint32(optionalDefault ...uint32) uint32 { + if s, ok := v.data.(uint32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint32 gets the value as a uint32. +// +// Panics if the object is not a uint32. +func (v *Value) MustUint32() uint32 { + return v.data.(uint32) +} + +// Uint32Slice gets the value as a []uint32, returns the optionalDefault +// value or nil if the value is not a []uint32. +func (v *Value) Uint32Slice(optionalDefault ...[]uint32) []uint32 { + if s, ok := v.data.([]uint32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUint32Slice gets the value as a []uint32. +// +// Panics if the object is not a []uint32. +func (v *Value) MustUint32Slice() []uint32 { + return v.data.([]uint32) +} + +// IsUint32 gets whether the object contained is a uint32 or not. +func (v *Value) IsUint32() bool { + _, ok := v.data.(uint32) + return ok +} + +// IsUint32Slice gets whether the object contained is a []uint32 or not. +func (v *Value) IsUint32Slice() bool { + _, ok := v.data.([]uint32) + return ok +} + +// EachUint32 calls the specified callback for each object +// in the []uint32. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint32(callback func(int, uint32) bool) *Value { + + for index, val := range v.MustUint32Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereUint32 uses the specified decider function to select items +// from the []uint32. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint32(decider func(int, uint32) bool) *Value { + + var selected []uint32 + + v.EachUint32(func(index int, val uint32) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupUint32 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint32. +func (v *Value) GroupUint32(grouper func(int, uint32) string) *Value { + + groups := make(map[string][]uint32) + + v.EachUint32(func(index int, val uint32) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint32, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceUint32 uses the specified function to replace each uint32s +// by iterating each item. The data in the returned result will be a +// []uint32 containing the replaced items. +func (v *Value) ReplaceUint32(replacer func(int, uint32) uint32) *Value { + + arr := v.MustUint32Slice() + replaced := make([]uint32, len(arr)) + + v.EachUint32(func(index int, val uint32) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectUint32 uses the specified collector function to collect a value +// for each of the uint32s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint32(collector func(int, uint32) interface{}) *Value { + + arr := v.MustUint32Slice() + collected := make([]interface{}, len(arr)) + + v.EachUint32(func(index int, val uint32) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Uint64 (uint64 and []uint64) + -------------------------------------------------- +*/ + +// Uint64 gets the value as a uint64, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint64(optionalDefault ...uint64) uint64 { + if s, ok := v.data.(uint64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint64 gets the value as a uint64. +// +// Panics if the object is not a uint64. +func (v *Value) MustUint64() uint64 { + return v.data.(uint64) +} + +// Uint64Slice gets the value as a []uint64, returns the optionalDefault +// value or nil if the value is not a []uint64. +func (v *Value) Uint64Slice(optionalDefault ...[]uint64) []uint64 { + if s, ok := v.data.([]uint64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUint64Slice gets the value as a []uint64. +// +// Panics if the object is not a []uint64. +func (v *Value) MustUint64Slice() []uint64 { + return v.data.([]uint64) +} + +// IsUint64 gets whether the object contained is a uint64 or not. +func (v *Value) IsUint64() bool { + _, ok := v.data.(uint64) + return ok +} + +// IsUint64Slice gets whether the object contained is a []uint64 or not. +func (v *Value) IsUint64Slice() bool { + _, ok := v.data.([]uint64) + return ok +} + +// EachUint64 calls the specified callback for each object +// in the []uint64. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint64(callback func(int, uint64) bool) *Value { + + for index, val := range v.MustUint64Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereUint64 uses the specified decider function to select items +// from the []uint64. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint64(decider func(int, uint64) bool) *Value { + + var selected []uint64 + + v.EachUint64(func(index int, val uint64) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupUint64 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint64. +func (v *Value) GroupUint64(grouper func(int, uint64) string) *Value { + + groups := make(map[string][]uint64) + + v.EachUint64(func(index int, val uint64) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint64, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceUint64 uses the specified function to replace each uint64s +// by iterating each item. The data in the returned result will be a +// []uint64 containing the replaced items. +func (v *Value) ReplaceUint64(replacer func(int, uint64) uint64) *Value { + + arr := v.MustUint64Slice() + replaced := make([]uint64, len(arr)) + + v.EachUint64(func(index int, val uint64) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectUint64 uses the specified collector function to collect a value +// for each of the uint64s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint64(collector func(int, uint64) interface{}) *Value { + + arr := v.MustUint64Slice() + collected := make([]interface{}, len(arr)) + + v.EachUint64(func(index int, val uint64) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Uintptr (uintptr and []uintptr) + -------------------------------------------------- +*/ + +// Uintptr gets the value as a uintptr, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uintptr(optionalDefault ...uintptr) uintptr { + if s, ok := v.data.(uintptr); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUintptr gets the value as a uintptr. +// +// Panics if the object is not a uintptr. +func (v *Value) MustUintptr() uintptr { + return v.data.(uintptr) +} + +// UintptrSlice gets the value as a []uintptr, returns the optionalDefault +// value or nil if the value is not a []uintptr. +func (v *Value) UintptrSlice(optionalDefault ...[]uintptr) []uintptr { + if s, ok := v.data.([]uintptr); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUintptrSlice gets the value as a []uintptr. +// +// Panics if the object is not a []uintptr. +func (v *Value) MustUintptrSlice() []uintptr { + return v.data.([]uintptr) +} + +// IsUintptr gets whether the object contained is a uintptr or not. +func (v *Value) IsUintptr() bool { + _, ok := v.data.(uintptr) + return ok +} + +// IsUintptrSlice gets whether the object contained is a []uintptr or not. +func (v *Value) IsUintptrSlice() bool { + _, ok := v.data.([]uintptr) + return ok +} + +// EachUintptr calls the specified callback for each object +// in the []uintptr. +// +// Panics if the object is the wrong type. +func (v *Value) EachUintptr(callback func(int, uintptr) bool) *Value { + + for index, val := range v.MustUintptrSlice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereUintptr uses the specified decider function to select items +// from the []uintptr. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUintptr(decider func(int, uintptr) bool) *Value { + + var selected []uintptr + + v.EachUintptr(func(index int, val uintptr) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupUintptr uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uintptr. +func (v *Value) GroupUintptr(grouper func(int, uintptr) string) *Value { + + groups := make(map[string][]uintptr) + + v.EachUintptr(func(index int, val uintptr) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uintptr, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceUintptr uses the specified function to replace each uintptrs +// by iterating each item. The data in the returned result will be a +// []uintptr containing the replaced items. +func (v *Value) ReplaceUintptr(replacer func(int, uintptr) uintptr) *Value { + + arr := v.MustUintptrSlice() + replaced := make([]uintptr, len(arr)) + + v.EachUintptr(func(index int, val uintptr) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectUintptr uses the specified collector function to collect a value +// for each of the uintptrs in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUintptr(collector func(int, uintptr) interface{}) *Value { + + arr := v.MustUintptrSlice() + collected := make([]interface{}, len(arr)) + + v.EachUintptr(func(index int, val uintptr) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Float32 (float32 and []float32) + -------------------------------------------------- +*/ + +// Float32 gets the value as a float32, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Float32(optionalDefault ...float32) float32 { + if s, ok := v.data.(float32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustFloat32 gets the value as a float32. +// +// Panics if the object is not a float32. +func (v *Value) MustFloat32() float32 { + return v.data.(float32) +} + +// Float32Slice gets the value as a []float32, returns the optionalDefault +// value or nil if the value is not a []float32. +func (v *Value) Float32Slice(optionalDefault ...[]float32) []float32 { + if s, ok := v.data.([]float32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustFloat32Slice gets the value as a []float32. +// +// Panics if the object is not a []float32. +func (v *Value) MustFloat32Slice() []float32 { + return v.data.([]float32) +} + +// IsFloat32 gets whether the object contained is a float32 or not. +func (v *Value) IsFloat32() bool { + _, ok := v.data.(float32) + return ok +} + +// IsFloat32Slice gets whether the object contained is a []float32 or not. +func (v *Value) IsFloat32Slice() bool { + _, ok := v.data.([]float32) + return ok +} + +// EachFloat32 calls the specified callback for each object +// in the []float32. +// +// Panics if the object is the wrong type. +func (v *Value) EachFloat32(callback func(int, float32) bool) *Value { + + for index, val := range v.MustFloat32Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereFloat32 uses the specified decider function to select items +// from the []float32. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereFloat32(decider func(int, float32) bool) *Value { + + var selected []float32 + + v.EachFloat32(func(index int, val float32) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupFloat32 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]float32. +func (v *Value) GroupFloat32(grouper func(int, float32) string) *Value { + + groups := make(map[string][]float32) + + v.EachFloat32(func(index int, val float32) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]float32, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceFloat32 uses the specified function to replace each float32s +// by iterating each item. The data in the returned result will be a +// []float32 containing the replaced items. +func (v *Value) ReplaceFloat32(replacer func(int, float32) float32) *Value { + + arr := v.MustFloat32Slice() + replaced := make([]float32, len(arr)) + + v.EachFloat32(func(index int, val float32) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectFloat32 uses the specified collector function to collect a value +// for each of the float32s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectFloat32(collector func(int, float32) interface{}) *Value { + + arr := v.MustFloat32Slice() + collected := make([]interface{}, len(arr)) + + v.EachFloat32(func(index int, val float32) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Float64 (float64 and []float64) + -------------------------------------------------- +*/ + +// Float64 gets the value as a float64, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Float64(optionalDefault ...float64) float64 { + if s, ok := v.data.(float64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustFloat64 gets the value as a float64. +// +// Panics if the object is not a float64. +func (v *Value) MustFloat64() float64 { + return v.data.(float64) +} + +// Float64Slice gets the value as a []float64, returns the optionalDefault +// value or nil if the value is not a []float64. +func (v *Value) Float64Slice(optionalDefault ...[]float64) []float64 { + if s, ok := v.data.([]float64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustFloat64Slice gets the value as a []float64. +// +// Panics if the object is not a []float64. +func (v *Value) MustFloat64Slice() []float64 { + return v.data.([]float64) +} + +// IsFloat64 gets whether the object contained is a float64 or not. +func (v *Value) IsFloat64() bool { + _, ok := v.data.(float64) + return ok +} + +// IsFloat64Slice gets whether the object contained is a []float64 or not. +func (v *Value) IsFloat64Slice() bool { + _, ok := v.data.([]float64) + return ok +} + +// EachFloat64 calls the specified callback for each object +// in the []float64. +// +// Panics if the object is the wrong type. +func (v *Value) EachFloat64(callback func(int, float64) bool) *Value { + + for index, val := range v.MustFloat64Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereFloat64 uses the specified decider function to select items +// from the []float64. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereFloat64(decider func(int, float64) bool) *Value { + + var selected []float64 + + v.EachFloat64(func(index int, val float64) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupFloat64 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]float64. +func (v *Value) GroupFloat64(grouper func(int, float64) string) *Value { + + groups := make(map[string][]float64) + + v.EachFloat64(func(index int, val float64) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]float64, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceFloat64 uses the specified function to replace each float64s +// by iterating each item. The data in the returned result will be a +// []float64 containing the replaced items. +func (v *Value) ReplaceFloat64(replacer func(int, float64) float64) *Value { + + arr := v.MustFloat64Slice() + replaced := make([]float64, len(arr)) + + v.EachFloat64(func(index int, val float64) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectFloat64 uses the specified collector function to collect a value +// for each of the float64s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectFloat64(collector func(int, float64) interface{}) *Value { + + arr := v.MustFloat64Slice() + collected := make([]interface{}, len(arr)) + + v.EachFloat64(func(index int, val float64) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Complex64 (complex64 and []complex64) + -------------------------------------------------- +*/ + +// Complex64 gets the value as a complex64, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Complex64(optionalDefault ...complex64) complex64 { + if s, ok := v.data.(complex64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustComplex64 gets the value as a complex64. +// +// Panics if the object is not a complex64. +func (v *Value) MustComplex64() complex64 { + return v.data.(complex64) +} + +// Complex64Slice gets the value as a []complex64, returns the optionalDefault +// value or nil if the value is not a []complex64. +func (v *Value) Complex64Slice(optionalDefault ...[]complex64) []complex64 { + if s, ok := v.data.([]complex64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustComplex64Slice gets the value as a []complex64. +// +// Panics if the object is not a []complex64. +func (v *Value) MustComplex64Slice() []complex64 { + return v.data.([]complex64) +} + +// IsComplex64 gets whether the object contained is a complex64 or not. +func (v *Value) IsComplex64() bool { + _, ok := v.data.(complex64) + return ok +} + +// IsComplex64Slice gets whether the object contained is a []complex64 or not. +func (v *Value) IsComplex64Slice() bool { + _, ok := v.data.([]complex64) + return ok +} + +// EachComplex64 calls the specified callback for each object +// in the []complex64. +// +// Panics if the object is the wrong type. +func (v *Value) EachComplex64(callback func(int, complex64) bool) *Value { + + for index, val := range v.MustComplex64Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereComplex64 uses the specified decider function to select items +// from the []complex64. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereComplex64(decider func(int, complex64) bool) *Value { + + var selected []complex64 + + v.EachComplex64(func(index int, val complex64) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupComplex64 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]complex64. +func (v *Value) GroupComplex64(grouper func(int, complex64) string) *Value { + + groups := make(map[string][]complex64) + + v.EachComplex64(func(index int, val complex64) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]complex64, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceComplex64 uses the specified function to replace each complex64s +// by iterating each item. The data in the returned result will be a +// []complex64 containing the replaced items. +func (v *Value) ReplaceComplex64(replacer func(int, complex64) complex64) *Value { + + arr := v.MustComplex64Slice() + replaced := make([]complex64, len(arr)) + + v.EachComplex64(func(index int, val complex64) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectComplex64 uses the specified collector function to collect a value +// for each of the complex64s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectComplex64(collector func(int, complex64) interface{}) *Value { + + arr := v.MustComplex64Slice() + collected := make([]interface{}, len(arr)) + + v.EachComplex64(func(index int, val complex64) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Complex128 (complex128 and []complex128) + -------------------------------------------------- +*/ + +// Complex128 gets the value as a complex128, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Complex128(optionalDefault ...complex128) complex128 { + if s, ok := v.data.(complex128); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustComplex128 gets the value as a complex128. +// +// Panics if the object is not a complex128. +func (v *Value) MustComplex128() complex128 { + return v.data.(complex128) +} + +// Complex128Slice gets the value as a []complex128, returns the optionalDefault +// value or nil if the value is not a []complex128. +func (v *Value) Complex128Slice(optionalDefault ...[]complex128) []complex128 { + if s, ok := v.data.([]complex128); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustComplex128Slice gets the value as a []complex128. +// +// Panics if the object is not a []complex128. +func (v *Value) MustComplex128Slice() []complex128 { + return v.data.([]complex128) +} + +// IsComplex128 gets whether the object contained is a complex128 or not. +func (v *Value) IsComplex128() bool { + _, ok := v.data.(complex128) + return ok +} + +// IsComplex128Slice gets whether the object contained is a []complex128 or not. +func (v *Value) IsComplex128Slice() bool { + _, ok := v.data.([]complex128) + return ok +} + +// EachComplex128 calls the specified callback for each object +// in the []complex128. +// +// Panics if the object is the wrong type. +func (v *Value) EachComplex128(callback func(int, complex128) bool) *Value { + + for index, val := range v.MustComplex128Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereComplex128 uses the specified decider function to select items +// from the []complex128. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereComplex128(decider func(int, complex128) bool) *Value { + + var selected []complex128 + + v.EachComplex128(func(index int, val complex128) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupComplex128 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]complex128. +func (v *Value) GroupComplex128(grouper func(int, complex128) string) *Value { + + groups := make(map[string][]complex128) + + v.EachComplex128(func(index int, val complex128) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]complex128, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceComplex128 uses the specified function to replace each complex128s +// by iterating each item. The data in the returned result will be a +// []complex128 containing the replaced items. +func (v *Value) ReplaceComplex128(replacer func(int, complex128) complex128) *Value { + + arr := v.MustComplex128Slice() + replaced := make([]complex128, len(arr)) + + v.EachComplex128(func(index int, val complex128) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectComplex128 uses the specified collector function to collect a value +// for each of the complex128s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectComplex128(collector func(int, complex128) interface{}) *Value { + + arr := v.MustComplex128Slice() + collected := make([]interface{}, len(arr)) + + v.EachComplex128(func(index int, val complex128) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} diff --git a/vendor/src/github.com/stretchr/objx/type_specific_codegen_test.go b/vendor/src/github.com/stretchr/objx/type_specific_codegen_test.go new file mode 100644 index 00000000..f7a4fcee --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/type_specific_codegen_test.go @@ -0,0 +1,2867 @@ +package objx + +import ( + "fmt" + "github.com/stretchr/testify/assert" + "testing" +) + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestInter(t *testing.T) { + + val := interface{}("something") + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Inter()) + assert.Equal(t, val, New(m).Get("value").MustInter()) + assert.Equal(t, interface{}(nil), New(m).Get("nothing").Inter()) + assert.Equal(t, val, New(m).Get("nothing").Inter("something")) + + assert.Panics(t, func() { + New(m).Get("age").MustInter() + }) + +} + +func TestInterSlice(t *testing.T) { + + val := interface{}("something") + m := map[string]interface{}{"value": []interface{}{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").InterSlice()[0]) + assert.Equal(t, val, New(m).Get("value").MustInterSlice()[0]) + assert.Equal(t, []interface{}(nil), New(m).Get("nothing").InterSlice()) + assert.Equal(t, val, New(m).Get("nothing").InterSlice([]interface{}{interface{}("something")})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustInterSlice() + }) + +} + +func TestIsInter(t *testing.T) { + + var v *Value + + v = &Value{data: interface{}("something")} + assert.True(t, v.IsInter()) + + v = &Value{data: []interface{}{interface{}("something")}} + assert.True(t, v.IsInterSlice()) + +} + +func TestEachInter(t *testing.T) { + + v := &Value{data: []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}} + count := 0 + replacedVals := make([]interface{}, 0) + assert.Equal(t, v, v.EachInter(func(i int, val interface{}) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustInterSlice()[0]) + assert.Equal(t, replacedVals[1], v.MustInterSlice()[1]) + assert.Equal(t, replacedVals[2], v.MustInterSlice()[2]) + +} + +func TestWhereInter(t *testing.T) { + + v := &Value{data: []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}} + + selected := v.WhereInter(func(i int, val interface{}) bool { + return i%2 == 0 + }).MustInterSlice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupInter(t *testing.T) { + + v := &Value{data: []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}} + + grouped := v.GroupInter(func(i int, val interface{}) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]interface{}) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceInter(t *testing.T) { + + v := &Value{data: []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}} + + rawArr := v.MustInterSlice() + + replaced := v.ReplaceInter(func(index int, val interface{}) interface{} { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustInterSlice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectInter(t *testing.T) { + + v := &Value{data: []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}} + + collected := v.CollectInter(func(index int, val interface{}) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestMSI(t *testing.T) { + + val := map[string]interface{}(map[string]interface{}{"name": "Tyler"}) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").MSI()) + assert.Equal(t, val, New(m).Get("value").MustMSI()) + assert.Equal(t, map[string]interface{}(nil), New(m).Get("nothing").MSI()) + assert.Equal(t, val, New(m).Get("nothing").MSI(map[string]interface{}{"name": "Tyler"})) + + assert.Panics(t, func() { + New(m).Get("age").MustMSI() + }) + +} + +func TestMSISlice(t *testing.T) { + + val := map[string]interface{}(map[string]interface{}{"name": "Tyler"}) + m := map[string]interface{}{"value": []map[string]interface{}{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").MSISlice()[0]) + assert.Equal(t, val, New(m).Get("value").MustMSISlice()[0]) + assert.Equal(t, []map[string]interface{}(nil), New(m).Get("nothing").MSISlice()) + assert.Equal(t, val, New(m).Get("nothing").MSISlice([]map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"})})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustMSISlice() + }) + +} + +func TestIsMSI(t *testing.T) { + + var v *Value + + v = &Value{data: map[string]interface{}(map[string]interface{}{"name": "Tyler"})} + assert.True(t, v.IsMSI()) + + v = &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"})}} + assert.True(t, v.IsMSISlice()) + +} + +func TestEachMSI(t *testing.T) { + + v := &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}} + count := 0 + replacedVals := make([]map[string]interface{}, 0) + assert.Equal(t, v, v.EachMSI(func(i int, val map[string]interface{}) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustMSISlice()[0]) + assert.Equal(t, replacedVals[1], v.MustMSISlice()[1]) + assert.Equal(t, replacedVals[2], v.MustMSISlice()[2]) + +} + +func TestWhereMSI(t *testing.T) { + + v := &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}} + + selected := v.WhereMSI(func(i int, val map[string]interface{}) bool { + return i%2 == 0 + }).MustMSISlice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupMSI(t *testing.T) { + + v := &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}} + + grouped := v.GroupMSI(func(i int, val map[string]interface{}) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]map[string]interface{}) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceMSI(t *testing.T) { + + v := &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}} + + rawArr := v.MustMSISlice() + + replaced := v.ReplaceMSI(func(index int, val map[string]interface{}) map[string]interface{} { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustMSISlice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectMSI(t *testing.T) { + + v := &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}} + + collected := v.CollectMSI(func(index int, val map[string]interface{}) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestObjxMap(t *testing.T) { + + val := (Map)(New(1)) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").ObjxMap()) + assert.Equal(t, val, New(m).Get("value").MustObjxMap()) + assert.Equal(t, (Map)(New(nil)), New(m).Get("nothing").ObjxMap()) + assert.Equal(t, val, New(m).Get("nothing").ObjxMap(New(1))) + + assert.Panics(t, func() { + New(m).Get("age").MustObjxMap() + }) + +} + +func TestObjxMapSlice(t *testing.T) { + + val := (Map)(New(1)) + m := map[string]interface{}{"value": [](Map){val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").ObjxMapSlice()[0]) + assert.Equal(t, val, New(m).Get("value").MustObjxMapSlice()[0]) + assert.Equal(t, [](Map)(nil), New(m).Get("nothing").ObjxMapSlice()) + assert.Equal(t, val, New(m).Get("nothing").ObjxMapSlice([](Map){(Map)(New(1))})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustObjxMapSlice() + }) + +} + +func TestIsObjxMap(t *testing.T) { + + var v *Value + + v = &Value{data: (Map)(New(1))} + assert.True(t, v.IsObjxMap()) + + v = &Value{data: [](Map){(Map)(New(1))}} + assert.True(t, v.IsObjxMapSlice()) + +} + +func TestEachObjxMap(t *testing.T) { + + v := &Value{data: [](Map){(Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1))}} + count := 0 + replacedVals := make([](Map), 0) + assert.Equal(t, v, v.EachObjxMap(func(i int, val Map) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustObjxMapSlice()[0]) + assert.Equal(t, replacedVals[1], v.MustObjxMapSlice()[1]) + assert.Equal(t, replacedVals[2], v.MustObjxMapSlice()[2]) + +} + +func TestWhereObjxMap(t *testing.T) { + + v := &Value{data: [](Map){(Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1))}} + + selected := v.WhereObjxMap(func(i int, val Map) bool { + return i%2 == 0 + }).MustObjxMapSlice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupObjxMap(t *testing.T) { + + v := &Value{data: [](Map){(Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1))}} + + grouped := v.GroupObjxMap(func(i int, val Map) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][](Map)) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceObjxMap(t *testing.T) { + + v := &Value{data: [](Map){(Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1))}} + + rawArr := v.MustObjxMapSlice() + + replaced := v.ReplaceObjxMap(func(index int, val Map) Map { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustObjxMapSlice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectObjxMap(t *testing.T) { + + v := &Value{data: [](Map){(Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1))}} + + collected := v.CollectObjxMap(func(index int, val Map) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestBool(t *testing.T) { + + val := bool(true) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Bool()) + assert.Equal(t, val, New(m).Get("value").MustBool()) + assert.Equal(t, bool(false), New(m).Get("nothing").Bool()) + assert.Equal(t, val, New(m).Get("nothing").Bool(true)) + + assert.Panics(t, func() { + New(m).Get("age").MustBool() + }) + +} + +func TestBoolSlice(t *testing.T) { + + val := bool(true) + m := map[string]interface{}{"value": []bool{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").BoolSlice()[0]) + assert.Equal(t, val, New(m).Get("value").MustBoolSlice()[0]) + assert.Equal(t, []bool(nil), New(m).Get("nothing").BoolSlice()) + assert.Equal(t, val, New(m).Get("nothing").BoolSlice([]bool{bool(true)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustBoolSlice() + }) + +} + +func TestIsBool(t *testing.T) { + + var v *Value + + v = &Value{data: bool(true)} + assert.True(t, v.IsBool()) + + v = &Value{data: []bool{bool(true)}} + assert.True(t, v.IsBoolSlice()) + +} + +func TestEachBool(t *testing.T) { + + v := &Value{data: []bool{bool(true), bool(true), bool(true), bool(true), bool(true)}} + count := 0 + replacedVals := make([]bool, 0) + assert.Equal(t, v, v.EachBool(func(i int, val bool) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustBoolSlice()[0]) + assert.Equal(t, replacedVals[1], v.MustBoolSlice()[1]) + assert.Equal(t, replacedVals[2], v.MustBoolSlice()[2]) + +} + +func TestWhereBool(t *testing.T) { + + v := &Value{data: []bool{bool(true), bool(true), bool(true), bool(true), bool(true), bool(true)}} + + selected := v.WhereBool(func(i int, val bool) bool { + return i%2 == 0 + }).MustBoolSlice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupBool(t *testing.T) { + + v := &Value{data: []bool{bool(true), bool(true), bool(true), bool(true), bool(true), bool(true)}} + + grouped := v.GroupBool(func(i int, val bool) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]bool) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceBool(t *testing.T) { + + v := &Value{data: []bool{bool(true), bool(true), bool(true), bool(true), bool(true), bool(true)}} + + rawArr := v.MustBoolSlice() + + replaced := v.ReplaceBool(func(index int, val bool) bool { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustBoolSlice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectBool(t *testing.T) { + + v := &Value{data: []bool{bool(true), bool(true), bool(true), bool(true), bool(true), bool(true)}} + + collected := v.CollectBool(func(index int, val bool) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestStr(t *testing.T) { + + val := string("hello") + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Str()) + assert.Equal(t, val, New(m).Get("value").MustStr()) + assert.Equal(t, string(""), New(m).Get("nothing").Str()) + assert.Equal(t, val, New(m).Get("nothing").Str("hello")) + + assert.Panics(t, func() { + New(m).Get("age").MustStr() + }) + +} + +func TestStrSlice(t *testing.T) { + + val := string("hello") + m := map[string]interface{}{"value": []string{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").StrSlice()[0]) + assert.Equal(t, val, New(m).Get("value").MustStrSlice()[0]) + assert.Equal(t, []string(nil), New(m).Get("nothing").StrSlice()) + assert.Equal(t, val, New(m).Get("nothing").StrSlice([]string{string("hello")})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustStrSlice() + }) + +} + +func TestIsStr(t *testing.T) { + + var v *Value + + v = &Value{data: string("hello")} + assert.True(t, v.IsStr()) + + v = &Value{data: []string{string("hello")}} + assert.True(t, v.IsStrSlice()) + +} + +func TestEachStr(t *testing.T) { + + v := &Value{data: []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}} + count := 0 + replacedVals := make([]string, 0) + assert.Equal(t, v, v.EachStr(func(i int, val string) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustStrSlice()[0]) + assert.Equal(t, replacedVals[1], v.MustStrSlice()[1]) + assert.Equal(t, replacedVals[2], v.MustStrSlice()[2]) + +} + +func TestWhereStr(t *testing.T) { + + v := &Value{data: []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}} + + selected := v.WhereStr(func(i int, val string) bool { + return i%2 == 0 + }).MustStrSlice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupStr(t *testing.T) { + + v := &Value{data: []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}} + + grouped := v.GroupStr(func(i int, val string) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]string) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceStr(t *testing.T) { + + v := &Value{data: []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}} + + rawArr := v.MustStrSlice() + + replaced := v.ReplaceStr(func(index int, val string) string { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustStrSlice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectStr(t *testing.T) { + + v := &Value{data: []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}} + + collected := v.CollectStr(func(index int, val string) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestInt(t *testing.T) { + + val := int(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Int()) + assert.Equal(t, val, New(m).Get("value").MustInt()) + assert.Equal(t, int(0), New(m).Get("nothing").Int()) + assert.Equal(t, val, New(m).Get("nothing").Int(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustInt() + }) + +} + +func TestIntSlice(t *testing.T) { + + val := int(1) + m := map[string]interface{}{"value": []int{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").IntSlice()[0]) + assert.Equal(t, val, New(m).Get("value").MustIntSlice()[0]) + assert.Equal(t, []int(nil), New(m).Get("nothing").IntSlice()) + assert.Equal(t, val, New(m).Get("nothing").IntSlice([]int{int(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustIntSlice() + }) + +} + +func TestIsInt(t *testing.T) { + + var v *Value + + v = &Value{data: int(1)} + assert.True(t, v.IsInt()) + + v = &Value{data: []int{int(1)}} + assert.True(t, v.IsIntSlice()) + +} + +func TestEachInt(t *testing.T) { + + v := &Value{data: []int{int(1), int(1), int(1), int(1), int(1)}} + count := 0 + replacedVals := make([]int, 0) + assert.Equal(t, v, v.EachInt(func(i int, val int) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustIntSlice()[0]) + assert.Equal(t, replacedVals[1], v.MustIntSlice()[1]) + assert.Equal(t, replacedVals[2], v.MustIntSlice()[2]) + +} + +func TestWhereInt(t *testing.T) { + + v := &Value{data: []int{int(1), int(1), int(1), int(1), int(1), int(1)}} + + selected := v.WhereInt(func(i int, val int) bool { + return i%2 == 0 + }).MustIntSlice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupInt(t *testing.T) { + + v := &Value{data: []int{int(1), int(1), int(1), int(1), int(1), int(1)}} + + grouped := v.GroupInt(func(i int, val int) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]int) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceInt(t *testing.T) { + + v := &Value{data: []int{int(1), int(1), int(1), int(1), int(1), int(1)}} + + rawArr := v.MustIntSlice() + + replaced := v.ReplaceInt(func(index int, val int) int { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustIntSlice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectInt(t *testing.T) { + + v := &Value{data: []int{int(1), int(1), int(1), int(1), int(1), int(1)}} + + collected := v.CollectInt(func(index int, val int) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestInt8(t *testing.T) { + + val := int8(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Int8()) + assert.Equal(t, val, New(m).Get("value").MustInt8()) + assert.Equal(t, int8(0), New(m).Get("nothing").Int8()) + assert.Equal(t, val, New(m).Get("nothing").Int8(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustInt8() + }) + +} + +func TestInt8Slice(t *testing.T) { + + val := int8(1) + m := map[string]interface{}{"value": []int8{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Int8Slice()[0]) + assert.Equal(t, val, New(m).Get("value").MustInt8Slice()[0]) + assert.Equal(t, []int8(nil), New(m).Get("nothing").Int8Slice()) + assert.Equal(t, val, New(m).Get("nothing").Int8Slice([]int8{int8(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustInt8Slice() + }) + +} + +func TestIsInt8(t *testing.T) { + + var v *Value + + v = &Value{data: int8(1)} + assert.True(t, v.IsInt8()) + + v = &Value{data: []int8{int8(1)}} + assert.True(t, v.IsInt8Slice()) + +} + +func TestEachInt8(t *testing.T) { + + v := &Value{data: []int8{int8(1), int8(1), int8(1), int8(1), int8(1)}} + count := 0 + replacedVals := make([]int8, 0) + assert.Equal(t, v, v.EachInt8(func(i int, val int8) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustInt8Slice()[0]) + assert.Equal(t, replacedVals[1], v.MustInt8Slice()[1]) + assert.Equal(t, replacedVals[2], v.MustInt8Slice()[2]) + +} + +func TestWhereInt8(t *testing.T) { + + v := &Value{data: []int8{int8(1), int8(1), int8(1), int8(1), int8(1), int8(1)}} + + selected := v.WhereInt8(func(i int, val int8) bool { + return i%2 == 0 + }).MustInt8Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupInt8(t *testing.T) { + + v := &Value{data: []int8{int8(1), int8(1), int8(1), int8(1), int8(1), int8(1)}} + + grouped := v.GroupInt8(func(i int, val int8) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]int8) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceInt8(t *testing.T) { + + v := &Value{data: []int8{int8(1), int8(1), int8(1), int8(1), int8(1), int8(1)}} + + rawArr := v.MustInt8Slice() + + replaced := v.ReplaceInt8(func(index int, val int8) int8 { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustInt8Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectInt8(t *testing.T) { + + v := &Value{data: []int8{int8(1), int8(1), int8(1), int8(1), int8(1), int8(1)}} + + collected := v.CollectInt8(func(index int, val int8) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestInt16(t *testing.T) { + + val := int16(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Int16()) + assert.Equal(t, val, New(m).Get("value").MustInt16()) + assert.Equal(t, int16(0), New(m).Get("nothing").Int16()) + assert.Equal(t, val, New(m).Get("nothing").Int16(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustInt16() + }) + +} + +func TestInt16Slice(t *testing.T) { + + val := int16(1) + m := map[string]interface{}{"value": []int16{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Int16Slice()[0]) + assert.Equal(t, val, New(m).Get("value").MustInt16Slice()[0]) + assert.Equal(t, []int16(nil), New(m).Get("nothing").Int16Slice()) + assert.Equal(t, val, New(m).Get("nothing").Int16Slice([]int16{int16(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustInt16Slice() + }) + +} + +func TestIsInt16(t *testing.T) { + + var v *Value + + v = &Value{data: int16(1)} + assert.True(t, v.IsInt16()) + + v = &Value{data: []int16{int16(1)}} + assert.True(t, v.IsInt16Slice()) + +} + +func TestEachInt16(t *testing.T) { + + v := &Value{data: []int16{int16(1), int16(1), int16(1), int16(1), int16(1)}} + count := 0 + replacedVals := make([]int16, 0) + assert.Equal(t, v, v.EachInt16(func(i int, val int16) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustInt16Slice()[0]) + assert.Equal(t, replacedVals[1], v.MustInt16Slice()[1]) + assert.Equal(t, replacedVals[2], v.MustInt16Slice()[2]) + +} + +func TestWhereInt16(t *testing.T) { + + v := &Value{data: []int16{int16(1), int16(1), int16(1), int16(1), int16(1), int16(1)}} + + selected := v.WhereInt16(func(i int, val int16) bool { + return i%2 == 0 + }).MustInt16Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupInt16(t *testing.T) { + + v := &Value{data: []int16{int16(1), int16(1), int16(1), int16(1), int16(1), int16(1)}} + + grouped := v.GroupInt16(func(i int, val int16) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]int16) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceInt16(t *testing.T) { + + v := &Value{data: []int16{int16(1), int16(1), int16(1), int16(1), int16(1), int16(1)}} + + rawArr := v.MustInt16Slice() + + replaced := v.ReplaceInt16(func(index int, val int16) int16 { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustInt16Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectInt16(t *testing.T) { + + v := &Value{data: []int16{int16(1), int16(1), int16(1), int16(1), int16(1), int16(1)}} + + collected := v.CollectInt16(func(index int, val int16) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestInt32(t *testing.T) { + + val := int32(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Int32()) + assert.Equal(t, val, New(m).Get("value").MustInt32()) + assert.Equal(t, int32(0), New(m).Get("nothing").Int32()) + assert.Equal(t, val, New(m).Get("nothing").Int32(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustInt32() + }) + +} + +func TestInt32Slice(t *testing.T) { + + val := int32(1) + m := map[string]interface{}{"value": []int32{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Int32Slice()[0]) + assert.Equal(t, val, New(m).Get("value").MustInt32Slice()[0]) + assert.Equal(t, []int32(nil), New(m).Get("nothing").Int32Slice()) + assert.Equal(t, val, New(m).Get("nothing").Int32Slice([]int32{int32(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustInt32Slice() + }) + +} + +func TestIsInt32(t *testing.T) { + + var v *Value + + v = &Value{data: int32(1)} + assert.True(t, v.IsInt32()) + + v = &Value{data: []int32{int32(1)}} + assert.True(t, v.IsInt32Slice()) + +} + +func TestEachInt32(t *testing.T) { + + v := &Value{data: []int32{int32(1), int32(1), int32(1), int32(1), int32(1)}} + count := 0 + replacedVals := make([]int32, 0) + assert.Equal(t, v, v.EachInt32(func(i int, val int32) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustInt32Slice()[0]) + assert.Equal(t, replacedVals[1], v.MustInt32Slice()[1]) + assert.Equal(t, replacedVals[2], v.MustInt32Slice()[2]) + +} + +func TestWhereInt32(t *testing.T) { + + v := &Value{data: []int32{int32(1), int32(1), int32(1), int32(1), int32(1), int32(1)}} + + selected := v.WhereInt32(func(i int, val int32) bool { + return i%2 == 0 + }).MustInt32Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupInt32(t *testing.T) { + + v := &Value{data: []int32{int32(1), int32(1), int32(1), int32(1), int32(1), int32(1)}} + + grouped := v.GroupInt32(func(i int, val int32) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]int32) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceInt32(t *testing.T) { + + v := &Value{data: []int32{int32(1), int32(1), int32(1), int32(1), int32(1), int32(1)}} + + rawArr := v.MustInt32Slice() + + replaced := v.ReplaceInt32(func(index int, val int32) int32 { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustInt32Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectInt32(t *testing.T) { + + v := &Value{data: []int32{int32(1), int32(1), int32(1), int32(1), int32(1), int32(1)}} + + collected := v.CollectInt32(func(index int, val int32) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestInt64(t *testing.T) { + + val := int64(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Int64()) + assert.Equal(t, val, New(m).Get("value").MustInt64()) + assert.Equal(t, int64(0), New(m).Get("nothing").Int64()) + assert.Equal(t, val, New(m).Get("nothing").Int64(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustInt64() + }) + +} + +func TestInt64Slice(t *testing.T) { + + val := int64(1) + m := map[string]interface{}{"value": []int64{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Int64Slice()[0]) + assert.Equal(t, val, New(m).Get("value").MustInt64Slice()[0]) + assert.Equal(t, []int64(nil), New(m).Get("nothing").Int64Slice()) + assert.Equal(t, val, New(m).Get("nothing").Int64Slice([]int64{int64(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustInt64Slice() + }) + +} + +func TestIsInt64(t *testing.T) { + + var v *Value + + v = &Value{data: int64(1)} + assert.True(t, v.IsInt64()) + + v = &Value{data: []int64{int64(1)}} + assert.True(t, v.IsInt64Slice()) + +} + +func TestEachInt64(t *testing.T) { + + v := &Value{data: []int64{int64(1), int64(1), int64(1), int64(1), int64(1)}} + count := 0 + replacedVals := make([]int64, 0) + assert.Equal(t, v, v.EachInt64(func(i int, val int64) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustInt64Slice()[0]) + assert.Equal(t, replacedVals[1], v.MustInt64Slice()[1]) + assert.Equal(t, replacedVals[2], v.MustInt64Slice()[2]) + +} + +func TestWhereInt64(t *testing.T) { + + v := &Value{data: []int64{int64(1), int64(1), int64(1), int64(1), int64(1), int64(1)}} + + selected := v.WhereInt64(func(i int, val int64) bool { + return i%2 == 0 + }).MustInt64Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupInt64(t *testing.T) { + + v := &Value{data: []int64{int64(1), int64(1), int64(1), int64(1), int64(1), int64(1)}} + + grouped := v.GroupInt64(func(i int, val int64) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]int64) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceInt64(t *testing.T) { + + v := &Value{data: []int64{int64(1), int64(1), int64(1), int64(1), int64(1), int64(1)}} + + rawArr := v.MustInt64Slice() + + replaced := v.ReplaceInt64(func(index int, val int64) int64 { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustInt64Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectInt64(t *testing.T) { + + v := &Value{data: []int64{int64(1), int64(1), int64(1), int64(1), int64(1), int64(1)}} + + collected := v.CollectInt64(func(index int, val int64) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestUint(t *testing.T) { + + val := uint(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Uint()) + assert.Equal(t, val, New(m).Get("value").MustUint()) + assert.Equal(t, uint(0), New(m).Get("nothing").Uint()) + assert.Equal(t, val, New(m).Get("nothing").Uint(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustUint() + }) + +} + +func TestUintSlice(t *testing.T) { + + val := uint(1) + m := map[string]interface{}{"value": []uint{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").UintSlice()[0]) + assert.Equal(t, val, New(m).Get("value").MustUintSlice()[0]) + assert.Equal(t, []uint(nil), New(m).Get("nothing").UintSlice()) + assert.Equal(t, val, New(m).Get("nothing").UintSlice([]uint{uint(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustUintSlice() + }) + +} + +func TestIsUint(t *testing.T) { + + var v *Value + + v = &Value{data: uint(1)} + assert.True(t, v.IsUint()) + + v = &Value{data: []uint{uint(1)}} + assert.True(t, v.IsUintSlice()) + +} + +func TestEachUint(t *testing.T) { + + v := &Value{data: []uint{uint(1), uint(1), uint(1), uint(1), uint(1)}} + count := 0 + replacedVals := make([]uint, 0) + assert.Equal(t, v, v.EachUint(func(i int, val uint) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustUintSlice()[0]) + assert.Equal(t, replacedVals[1], v.MustUintSlice()[1]) + assert.Equal(t, replacedVals[2], v.MustUintSlice()[2]) + +} + +func TestWhereUint(t *testing.T) { + + v := &Value{data: []uint{uint(1), uint(1), uint(1), uint(1), uint(1), uint(1)}} + + selected := v.WhereUint(func(i int, val uint) bool { + return i%2 == 0 + }).MustUintSlice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupUint(t *testing.T) { + + v := &Value{data: []uint{uint(1), uint(1), uint(1), uint(1), uint(1), uint(1)}} + + grouped := v.GroupUint(func(i int, val uint) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]uint) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceUint(t *testing.T) { + + v := &Value{data: []uint{uint(1), uint(1), uint(1), uint(1), uint(1), uint(1)}} + + rawArr := v.MustUintSlice() + + replaced := v.ReplaceUint(func(index int, val uint) uint { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustUintSlice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectUint(t *testing.T) { + + v := &Value{data: []uint{uint(1), uint(1), uint(1), uint(1), uint(1), uint(1)}} + + collected := v.CollectUint(func(index int, val uint) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestUint8(t *testing.T) { + + val := uint8(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Uint8()) + assert.Equal(t, val, New(m).Get("value").MustUint8()) + assert.Equal(t, uint8(0), New(m).Get("nothing").Uint8()) + assert.Equal(t, val, New(m).Get("nothing").Uint8(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustUint8() + }) + +} + +func TestUint8Slice(t *testing.T) { + + val := uint8(1) + m := map[string]interface{}{"value": []uint8{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Uint8Slice()[0]) + assert.Equal(t, val, New(m).Get("value").MustUint8Slice()[0]) + assert.Equal(t, []uint8(nil), New(m).Get("nothing").Uint8Slice()) + assert.Equal(t, val, New(m).Get("nothing").Uint8Slice([]uint8{uint8(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustUint8Slice() + }) + +} + +func TestIsUint8(t *testing.T) { + + var v *Value + + v = &Value{data: uint8(1)} + assert.True(t, v.IsUint8()) + + v = &Value{data: []uint8{uint8(1)}} + assert.True(t, v.IsUint8Slice()) + +} + +func TestEachUint8(t *testing.T) { + + v := &Value{data: []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}} + count := 0 + replacedVals := make([]uint8, 0) + assert.Equal(t, v, v.EachUint8(func(i int, val uint8) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustUint8Slice()[0]) + assert.Equal(t, replacedVals[1], v.MustUint8Slice()[1]) + assert.Equal(t, replacedVals[2], v.MustUint8Slice()[2]) + +} + +func TestWhereUint8(t *testing.T) { + + v := &Value{data: []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}} + + selected := v.WhereUint8(func(i int, val uint8) bool { + return i%2 == 0 + }).MustUint8Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupUint8(t *testing.T) { + + v := &Value{data: []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}} + + grouped := v.GroupUint8(func(i int, val uint8) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]uint8) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceUint8(t *testing.T) { + + v := &Value{data: []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}} + + rawArr := v.MustUint8Slice() + + replaced := v.ReplaceUint8(func(index int, val uint8) uint8 { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustUint8Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectUint8(t *testing.T) { + + v := &Value{data: []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}} + + collected := v.CollectUint8(func(index int, val uint8) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestUint16(t *testing.T) { + + val := uint16(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Uint16()) + assert.Equal(t, val, New(m).Get("value").MustUint16()) + assert.Equal(t, uint16(0), New(m).Get("nothing").Uint16()) + assert.Equal(t, val, New(m).Get("nothing").Uint16(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustUint16() + }) + +} + +func TestUint16Slice(t *testing.T) { + + val := uint16(1) + m := map[string]interface{}{"value": []uint16{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Uint16Slice()[0]) + assert.Equal(t, val, New(m).Get("value").MustUint16Slice()[0]) + assert.Equal(t, []uint16(nil), New(m).Get("nothing").Uint16Slice()) + assert.Equal(t, val, New(m).Get("nothing").Uint16Slice([]uint16{uint16(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustUint16Slice() + }) + +} + +func TestIsUint16(t *testing.T) { + + var v *Value + + v = &Value{data: uint16(1)} + assert.True(t, v.IsUint16()) + + v = &Value{data: []uint16{uint16(1)}} + assert.True(t, v.IsUint16Slice()) + +} + +func TestEachUint16(t *testing.T) { + + v := &Value{data: []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}} + count := 0 + replacedVals := make([]uint16, 0) + assert.Equal(t, v, v.EachUint16(func(i int, val uint16) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustUint16Slice()[0]) + assert.Equal(t, replacedVals[1], v.MustUint16Slice()[1]) + assert.Equal(t, replacedVals[2], v.MustUint16Slice()[2]) + +} + +func TestWhereUint16(t *testing.T) { + + v := &Value{data: []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}} + + selected := v.WhereUint16(func(i int, val uint16) bool { + return i%2 == 0 + }).MustUint16Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupUint16(t *testing.T) { + + v := &Value{data: []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}} + + grouped := v.GroupUint16(func(i int, val uint16) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]uint16) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceUint16(t *testing.T) { + + v := &Value{data: []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}} + + rawArr := v.MustUint16Slice() + + replaced := v.ReplaceUint16(func(index int, val uint16) uint16 { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustUint16Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectUint16(t *testing.T) { + + v := &Value{data: []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}} + + collected := v.CollectUint16(func(index int, val uint16) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestUint32(t *testing.T) { + + val := uint32(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Uint32()) + assert.Equal(t, val, New(m).Get("value").MustUint32()) + assert.Equal(t, uint32(0), New(m).Get("nothing").Uint32()) + assert.Equal(t, val, New(m).Get("nothing").Uint32(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustUint32() + }) + +} + +func TestUint32Slice(t *testing.T) { + + val := uint32(1) + m := map[string]interface{}{"value": []uint32{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Uint32Slice()[0]) + assert.Equal(t, val, New(m).Get("value").MustUint32Slice()[0]) + assert.Equal(t, []uint32(nil), New(m).Get("nothing").Uint32Slice()) + assert.Equal(t, val, New(m).Get("nothing").Uint32Slice([]uint32{uint32(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustUint32Slice() + }) + +} + +func TestIsUint32(t *testing.T) { + + var v *Value + + v = &Value{data: uint32(1)} + assert.True(t, v.IsUint32()) + + v = &Value{data: []uint32{uint32(1)}} + assert.True(t, v.IsUint32Slice()) + +} + +func TestEachUint32(t *testing.T) { + + v := &Value{data: []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}} + count := 0 + replacedVals := make([]uint32, 0) + assert.Equal(t, v, v.EachUint32(func(i int, val uint32) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustUint32Slice()[0]) + assert.Equal(t, replacedVals[1], v.MustUint32Slice()[1]) + assert.Equal(t, replacedVals[2], v.MustUint32Slice()[2]) + +} + +func TestWhereUint32(t *testing.T) { + + v := &Value{data: []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}} + + selected := v.WhereUint32(func(i int, val uint32) bool { + return i%2 == 0 + }).MustUint32Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupUint32(t *testing.T) { + + v := &Value{data: []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}} + + grouped := v.GroupUint32(func(i int, val uint32) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]uint32) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceUint32(t *testing.T) { + + v := &Value{data: []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}} + + rawArr := v.MustUint32Slice() + + replaced := v.ReplaceUint32(func(index int, val uint32) uint32 { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustUint32Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectUint32(t *testing.T) { + + v := &Value{data: []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}} + + collected := v.CollectUint32(func(index int, val uint32) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestUint64(t *testing.T) { + + val := uint64(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Uint64()) + assert.Equal(t, val, New(m).Get("value").MustUint64()) + assert.Equal(t, uint64(0), New(m).Get("nothing").Uint64()) + assert.Equal(t, val, New(m).Get("nothing").Uint64(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustUint64() + }) + +} + +func TestUint64Slice(t *testing.T) { + + val := uint64(1) + m := map[string]interface{}{"value": []uint64{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Uint64Slice()[0]) + assert.Equal(t, val, New(m).Get("value").MustUint64Slice()[0]) + assert.Equal(t, []uint64(nil), New(m).Get("nothing").Uint64Slice()) + assert.Equal(t, val, New(m).Get("nothing").Uint64Slice([]uint64{uint64(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustUint64Slice() + }) + +} + +func TestIsUint64(t *testing.T) { + + var v *Value + + v = &Value{data: uint64(1)} + assert.True(t, v.IsUint64()) + + v = &Value{data: []uint64{uint64(1)}} + assert.True(t, v.IsUint64Slice()) + +} + +func TestEachUint64(t *testing.T) { + + v := &Value{data: []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}} + count := 0 + replacedVals := make([]uint64, 0) + assert.Equal(t, v, v.EachUint64(func(i int, val uint64) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustUint64Slice()[0]) + assert.Equal(t, replacedVals[1], v.MustUint64Slice()[1]) + assert.Equal(t, replacedVals[2], v.MustUint64Slice()[2]) + +} + +func TestWhereUint64(t *testing.T) { + + v := &Value{data: []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}} + + selected := v.WhereUint64(func(i int, val uint64) bool { + return i%2 == 0 + }).MustUint64Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupUint64(t *testing.T) { + + v := &Value{data: []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}} + + grouped := v.GroupUint64(func(i int, val uint64) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]uint64) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceUint64(t *testing.T) { + + v := &Value{data: []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}} + + rawArr := v.MustUint64Slice() + + replaced := v.ReplaceUint64(func(index int, val uint64) uint64 { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustUint64Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectUint64(t *testing.T) { + + v := &Value{data: []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}} + + collected := v.CollectUint64(func(index int, val uint64) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestUintptr(t *testing.T) { + + val := uintptr(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Uintptr()) + assert.Equal(t, val, New(m).Get("value").MustUintptr()) + assert.Equal(t, uintptr(0), New(m).Get("nothing").Uintptr()) + assert.Equal(t, val, New(m).Get("nothing").Uintptr(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustUintptr() + }) + +} + +func TestUintptrSlice(t *testing.T) { + + val := uintptr(1) + m := map[string]interface{}{"value": []uintptr{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").UintptrSlice()[0]) + assert.Equal(t, val, New(m).Get("value").MustUintptrSlice()[0]) + assert.Equal(t, []uintptr(nil), New(m).Get("nothing").UintptrSlice()) + assert.Equal(t, val, New(m).Get("nothing").UintptrSlice([]uintptr{uintptr(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustUintptrSlice() + }) + +} + +func TestIsUintptr(t *testing.T) { + + var v *Value + + v = &Value{data: uintptr(1)} + assert.True(t, v.IsUintptr()) + + v = &Value{data: []uintptr{uintptr(1)}} + assert.True(t, v.IsUintptrSlice()) + +} + +func TestEachUintptr(t *testing.T) { + + v := &Value{data: []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}} + count := 0 + replacedVals := make([]uintptr, 0) + assert.Equal(t, v, v.EachUintptr(func(i int, val uintptr) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustUintptrSlice()[0]) + assert.Equal(t, replacedVals[1], v.MustUintptrSlice()[1]) + assert.Equal(t, replacedVals[2], v.MustUintptrSlice()[2]) + +} + +func TestWhereUintptr(t *testing.T) { + + v := &Value{data: []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}} + + selected := v.WhereUintptr(func(i int, val uintptr) bool { + return i%2 == 0 + }).MustUintptrSlice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupUintptr(t *testing.T) { + + v := &Value{data: []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}} + + grouped := v.GroupUintptr(func(i int, val uintptr) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]uintptr) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceUintptr(t *testing.T) { + + v := &Value{data: []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}} + + rawArr := v.MustUintptrSlice() + + replaced := v.ReplaceUintptr(func(index int, val uintptr) uintptr { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustUintptrSlice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectUintptr(t *testing.T) { + + v := &Value{data: []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}} + + collected := v.CollectUintptr(func(index int, val uintptr) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestFloat32(t *testing.T) { + + val := float32(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Float32()) + assert.Equal(t, val, New(m).Get("value").MustFloat32()) + assert.Equal(t, float32(0), New(m).Get("nothing").Float32()) + assert.Equal(t, val, New(m).Get("nothing").Float32(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustFloat32() + }) + +} + +func TestFloat32Slice(t *testing.T) { + + val := float32(1) + m := map[string]interface{}{"value": []float32{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Float32Slice()[0]) + assert.Equal(t, val, New(m).Get("value").MustFloat32Slice()[0]) + assert.Equal(t, []float32(nil), New(m).Get("nothing").Float32Slice()) + assert.Equal(t, val, New(m).Get("nothing").Float32Slice([]float32{float32(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustFloat32Slice() + }) + +} + +func TestIsFloat32(t *testing.T) { + + var v *Value + + v = &Value{data: float32(1)} + assert.True(t, v.IsFloat32()) + + v = &Value{data: []float32{float32(1)}} + assert.True(t, v.IsFloat32Slice()) + +} + +func TestEachFloat32(t *testing.T) { + + v := &Value{data: []float32{float32(1), float32(1), float32(1), float32(1), float32(1)}} + count := 0 + replacedVals := make([]float32, 0) + assert.Equal(t, v, v.EachFloat32(func(i int, val float32) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustFloat32Slice()[0]) + assert.Equal(t, replacedVals[1], v.MustFloat32Slice()[1]) + assert.Equal(t, replacedVals[2], v.MustFloat32Slice()[2]) + +} + +func TestWhereFloat32(t *testing.T) { + + v := &Value{data: []float32{float32(1), float32(1), float32(1), float32(1), float32(1), float32(1)}} + + selected := v.WhereFloat32(func(i int, val float32) bool { + return i%2 == 0 + }).MustFloat32Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupFloat32(t *testing.T) { + + v := &Value{data: []float32{float32(1), float32(1), float32(1), float32(1), float32(1), float32(1)}} + + grouped := v.GroupFloat32(func(i int, val float32) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]float32) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceFloat32(t *testing.T) { + + v := &Value{data: []float32{float32(1), float32(1), float32(1), float32(1), float32(1), float32(1)}} + + rawArr := v.MustFloat32Slice() + + replaced := v.ReplaceFloat32(func(index int, val float32) float32 { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustFloat32Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectFloat32(t *testing.T) { + + v := &Value{data: []float32{float32(1), float32(1), float32(1), float32(1), float32(1), float32(1)}} + + collected := v.CollectFloat32(func(index int, val float32) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestFloat64(t *testing.T) { + + val := float64(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Float64()) + assert.Equal(t, val, New(m).Get("value").MustFloat64()) + assert.Equal(t, float64(0), New(m).Get("nothing").Float64()) + assert.Equal(t, val, New(m).Get("nothing").Float64(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustFloat64() + }) + +} + +func TestFloat64Slice(t *testing.T) { + + val := float64(1) + m := map[string]interface{}{"value": []float64{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Float64Slice()[0]) + assert.Equal(t, val, New(m).Get("value").MustFloat64Slice()[0]) + assert.Equal(t, []float64(nil), New(m).Get("nothing").Float64Slice()) + assert.Equal(t, val, New(m).Get("nothing").Float64Slice([]float64{float64(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustFloat64Slice() + }) + +} + +func TestIsFloat64(t *testing.T) { + + var v *Value + + v = &Value{data: float64(1)} + assert.True(t, v.IsFloat64()) + + v = &Value{data: []float64{float64(1)}} + assert.True(t, v.IsFloat64Slice()) + +} + +func TestEachFloat64(t *testing.T) { + + v := &Value{data: []float64{float64(1), float64(1), float64(1), float64(1), float64(1)}} + count := 0 + replacedVals := make([]float64, 0) + assert.Equal(t, v, v.EachFloat64(func(i int, val float64) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustFloat64Slice()[0]) + assert.Equal(t, replacedVals[1], v.MustFloat64Slice()[1]) + assert.Equal(t, replacedVals[2], v.MustFloat64Slice()[2]) + +} + +func TestWhereFloat64(t *testing.T) { + + v := &Value{data: []float64{float64(1), float64(1), float64(1), float64(1), float64(1), float64(1)}} + + selected := v.WhereFloat64(func(i int, val float64) bool { + return i%2 == 0 + }).MustFloat64Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupFloat64(t *testing.T) { + + v := &Value{data: []float64{float64(1), float64(1), float64(1), float64(1), float64(1), float64(1)}} + + grouped := v.GroupFloat64(func(i int, val float64) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]float64) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceFloat64(t *testing.T) { + + v := &Value{data: []float64{float64(1), float64(1), float64(1), float64(1), float64(1), float64(1)}} + + rawArr := v.MustFloat64Slice() + + replaced := v.ReplaceFloat64(func(index int, val float64) float64 { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustFloat64Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectFloat64(t *testing.T) { + + v := &Value{data: []float64{float64(1), float64(1), float64(1), float64(1), float64(1), float64(1)}} + + collected := v.CollectFloat64(func(index int, val float64) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestComplex64(t *testing.T) { + + val := complex64(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Complex64()) + assert.Equal(t, val, New(m).Get("value").MustComplex64()) + assert.Equal(t, complex64(0), New(m).Get("nothing").Complex64()) + assert.Equal(t, val, New(m).Get("nothing").Complex64(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustComplex64() + }) + +} + +func TestComplex64Slice(t *testing.T) { + + val := complex64(1) + m := map[string]interface{}{"value": []complex64{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Complex64Slice()[0]) + assert.Equal(t, val, New(m).Get("value").MustComplex64Slice()[0]) + assert.Equal(t, []complex64(nil), New(m).Get("nothing").Complex64Slice()) + assert.Equal(t, val, New(m).Get("nothing").Complex64Slice([]complex64{complex64(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustComplex64Slice() + }) + +} + +func TestIsComplex64(t *testing.T) { + + var v *Value + + v = &Value{data: complex64(1)} + assert.True(t, v.IsComplex64()) + + v = &Value{data: []complex64{complex64(1)}} + assert.True(t, v.IsComplex64Slice()) + +} + +func TestEachComplex64(t *testing.T) { + + v := &Value{data: []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}} + count := 0 + replacedVals := make([]complex64, 0) + assert.Equal(t, v, v.EachComplex64(func(i int, val complex64) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustComplex64Slice()[0]) + assert.Equal(t, replacedVals[1], v.MustComplex64Slice()[1]) + assert.Equal(t, replacedVals[2], v.MustComplex64Slice()[2]) + +} + +func TestWhereComplex64(t *testing.T) { + + v := &Value{data: []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}} + + selected := v.WhereComplex64(func(i int, val complex64) bool { + return i%2 == 0 + }).MustComplex64Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupComplex64(t *testing.T) { + + v := &Value{data: []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}} + + grouped := v.GroupComplex64(func(i int, val complex64) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]complex64) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceComplex64(t *testing.T) { + + v := &Value{data: []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}} + + rawArr := v.MustComplex64Slice() + + replaced := v.ReplaceComplex64(func(index int, val complex64) complex64 { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustComplex64Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectComplex64(t *testing.T) { + + v := &Value{data: []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}} + + collected := v.CollectComplex64(func(index int, val complex64) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func TestComplex128(t *testing.T) { + + val := complex128(1) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Complex128()) + assert.Equal(t, val, New(m).Get("value").MustComplex128()) + assert.Equal(t, complex128(0), New(m).Get("nothing").Complex128()) + assert.Equal(t, val, New(m).Get("nothing").Complex128(1)) + + assert.Panics(t, func() { + New(m).Get("age").MustComplex128() + }) + +} + +func TestComplex128Slice(t *testing.T) { + + val := complex128(1) + m := map[string]interface{}{"value": []complex128{val}, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").Complex128Slice()[0]) + assert.Equal(t, val, New(m).Get("value").MustComplex128Slice()[0]) + assert.Equal(t, []complex128(nil), New(m).Get("nothing").Complex128Slice()) + assert.Equal(t, val, New(m).Get("nothing").Complex128Slice([]complex128{complex128(1)})[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").MustComplex128Slice() + }) + +} + +func TestIsComplex128(t *testing.T) { + + var v *Value + + v = &Value{data: complex128(1)} + assert.True(t, v.IsComplex128()) + + v = &Value{data: []complex128{complex128(1)}} + assert.True(t, v.IsComplex128Slice()) + +} + +func TestEachComplex128(t *testing.T) { + + v := &Value{data: []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}} + count := 0 + replacedVals := make([]complex128, 0) + assert.Equal(t, v, v.EachComplex128(func(i int, val complex128) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.MustComplex128Slice()[0]) + assert.Equal(t, replacedVals[1], v.MustComplex128Slice()[1]) + assert.Equal(t, replacedVals[2], v.MustComplex128Slice()[2]) + +} + +func TestWhereComplex128(t *testing.T) { + + v := &Value{data: []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}} + + selected := v.WhereComplex128(func(i int, val complex128) bool { + return i%2 == 0 + }).MustComplex128Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroupComplex128(t *testing.T) { + + v := &Value{data: []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}} + + grouped := v.GroupComplex128(func(i int, val complex128) string { + return fmt.Sprintf("%v", i%2 == 0) + }).data.(map[string][]complex128) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplaceComplex128(t *testing.T) { + + v := &Value{data: []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}} + + rawArr := v.MustComplex128Slice() + + replaced := v.ReplaceComplex128(func(index int, val complex128) complex128 { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.MustComplex128Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollectComplex128(t *testing.T) { + + v := &Value{data: []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}} + + collected := v.CollectComplex128(func(index int, val complex128) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} diff --git a/vendor/src/github.com/stretchr/objx/value.go b/vendor/src/github.com/stretchr/objx/value.go new file mode 100644 index 00000000..7aaef06b --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/value.go @@ -0,0 +1,13 @@ +package objx + +// Value provides methods for extracting interface{} data in various +// types. +type Value struct { + // data contains the raw data being managed by this Value + data interface{} +} + +// Data returns the raw data contained by this Value +func (v *Value) Data() interface{} { + return v.data +} diff --git a/vendor/src/github.com/stretchr/objx/value_test.go b/vendor/src/github.com/stretchr/objx/value_test.go new file mode 100644 index 00000000..0bc65d92 --- /dev/null +++ b/vendor/src/github.com/stretchr/objx/value_test.go @@ -0,0 +1 @@ +package objx From 446de1602cf95ca8c5b1cf05bba7fa554f929df5 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Sat, 19 Sep 2015 23:37:40 +0300 Subject: [PATCH 004/131] FROM initial implementation --- src/rocker/build2/build.go | 6 +++ src/rocker/build2/build_test.go | 27 +++++++++-- src/rocker/build2/client.go | 20 ++++++-- src/rocker/build2/commands.go | 47 +++++++++++++++++- src/rocker/build2/commands_test.go | 78 ++++++++++++++++++++++++++++++ src/rocker/build2/plan_test.go | 20 +++++++- 6 files changed, 187 insertions(+), 11 deletions(-) create mode 100644 src/rocker/build2/commands_test.go diff --git a/src/rocker/build2/build.go b/src/rocker/build2/build.go index 53fdae2d..3a7bd913 100644 --- a/src/rocker/build2/build.go +++ b/src/rocker/build2/build.go @@ -24,6 +24,10 @@ import ( "github.com/fsouza/go-dockerclient" ) +var ( + NoBaseImageSpecifier = "scratch" +) + type BuildConfig struct { OutStream io.Writer InStream io.ReadCloser @@ -37,6 +41,8 @@ type Build struct { cfg BuildConfig container *docker.Config client Client + + imageID string } func New(client Client, rockerfile *Rockerfile, cfg BuildConfig) (b *Build, err error) { diff --git a/src/rocker/build2/build_test.go b/src/rocker/build2/build_test.go index 5e31eda3..723762d6 100644 --- a/src/rocker/build2/build_test.go +++ b/src/rocker/build2/build_test.go @@ -22,17 +22,19 @@ import ( "strings" "testing" + "github.com/fsouza/go-dockerclient" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" ) func TestNewBuild(t *testing.T) { - b := makeBuild(t, "FROM ubuntu", BuildConfig{}) + b, _ := makeBuild(t, "FROM ubuntu", BuildConfig{}) assert.IsType(t, &Rockerfile{}, b.rockerfile) } // internal helpers -func makeBuild(t *testing.T, rockerfileContent string, cfg BuildConfig) *Build { +func makeBuild(t *testing.T, rockerfileContent string, cfg BuildConfig) (*Build, *MockClient) { pc, _, _, _ := runtime.Caller(1) fn := runtime.FuncForPC(pc) @@ -41,11 +43,26 @@ func makeBuild(t *testing.T, rockerfileContent string, cfg BuildConfig) *Build { t.Fatal(err) } - b, err := New(&MockClient{}, r, BuildConfig{}) + c := &MockClient{} + + b, err := New(c, r, BuildConfig{}) if err != nil { t.Fatal(err) } - return b + + return b, c +} + +type MockClient struct { + mock.Mock } -type MockClient struct{} +func (m *MockClient) InspectImage(name string) (*docker.Image, error) { + args := m.Called(name) + return args.Get(0).(*docker.Image), args.Error(1) +} + +func (m *MockClient) PullImage(name string) error { + args := m.Called(name) + return args.Error(0) +} diff --git a/src/rocker/build2/client.go b/src/rocker/build2/client.go index 0452687f..a79c9457 100644 --- a/src/rocker/build2/client.go +++ b/src/rocker/build2/client.go @@ -16,17 +16,31 @@ package build2 -import "github.com/fsouza/go-dockerclient" +import ( + "fmt" + + "github.com/fsouza/go-dockerclient" +) type Client interface { + InspectImage(name string) (*docker.Image, error) + PullImage(name string) error } type DockerClient struct { - Client *docker.Client + client *docker.Client } func NewDockerClient(dockerClient *docker.Client) *DockerClient { return &DockerClient{ - Client: dockerClient, + client: dockerClient, } } + +func (c *DockerClient) InspectImage(name string) (*docker.Image, error) { + return c.client.InspectImage(name) +} + +func (c *DockerClient) PullImage(name string) error { + return fmt.Errorf("PullImage not implemented yet") +} diff --git a/src/rocker/build2/commands.go b/src/rocker/build2/commands.go index f19bd20c..a2387441 100644 --- a/src/rocker/build2/commands.go +++ b/src/rocker/build2/commands.go @@ -16,7 +16,11 @@ package build2 -import "fmt" +import ( + "fmt" + + "github.com/fsouza/go-dockerclient" +) type ConfigCommand struct { name string @@ -41,6 +45,8 @@ func NewCommand(cfg ConfigCommand) (Command, error) { return &CommandEnv{cfg}, nil case "tag": return &CommandTag{cfg}, nil + case "copy": + return &CommandCopy{cfg}, nil } return nil, fmt.Errorf("Unknown command: %s", cfg.name) } @@ -49,7 +55,36 @@ type CommandFrom struct { cfg ConfigCommand } -func (c *CommandFrom) Execute(b *Build) error { +func (c *CommandFrom) Execute(b *Build) (err error) { + // TODO: for "scratch" image we may use /images/create + + if len(c.cfg.args) != 1 { + return fmt.Errorf("FROM requires one argument") + } + + var ( + img *docker.Image + name = c.cfg.args[0] + ) + + if img, err = b.client.InspectImage(name); err != nil { + return err + } + + if img == nil { + if err = b.client.PullImage(name); err != nil { + return err + } + if img, err = b.client.InspectImage(name); err != nil { + return err + } + if img == nil { + return fmt.Errorf("FROM: Failed to inspect image after pull: %s", name) + } + } + + b.imageID = img.ID + return nil } @@ -88,3 +123,11 @@ type CommandTag struct { func (c *CommandTag) Execute(b *Build) error { return nil } + +type CommandCopy struct { + cfg ConfigCommand +} + +func (c *CommandCopy) Execute(b *Build) error { + return nil +} diff --git a/src/rocker/build2/commands_test.go b/src/rocker/build2/commands_test.go new file mode 100644 index 00000000..8478b9a0 --- /dev/null +++ b/src/rocker/build2/commands_test.go @@ -0,0 +1,78 @@ +/*- + * Copyright 2015 Grammarly, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package build2 + +import ( + "testing" + + "github.com/fsouza/go-dockerclient" + "github.com/stretchr/testify/assert" +) + +func TestCommandFrom_Existing(t *testing.T) { + b, c := makeBuild(t, "", BuildConfig{}) + cmd := &CommandFrom{ConfigCommand{ + args: []string{"existing"}, + }} + + c.On("InspectImage", "existing").Return(&docker.Image{ID: "123"}, nil) + + err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + c.AssertExpectations(t) + assert.Equal(t, "123", b.imageID) +} + +func TestCommandFrom_NotExisting(t *testing.T) { + b, c := makeBuild(t, "", BuildConfig{}) + cmd := &CommandFrom{ConfigCommand{ + args: []string{"not-existing"}, + }} + + var img *docker.Image + + c.On("InspectImage", "not-existing").Return(img, nil).Once() + c.On("PullImage", "not-existing").Return(nil).Once() + c.On("InspectImage", "not-existing").Return(&docker.Image{ID: "123"}, nil).Once() + + err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + c.AssertExpectations(t) + assert.Equal(t, "123", b.imageID) +} + +func TestCommandFrom_AfterPullNotExisting(t *testing.T) { + b, c := makeBuild(t, "", BuildConfig{}) + cmd := &CommandFrom{ConfigCommand{ + args: []string{"not-existing"}, + }} + + var nilImg *docker.Image + + c.On("InspectImage", "not-existing").Return(nilImg, nil).Twice() + c.On("PullImage", "not-existing").Return(nil).Once() + + err := cmd.Execute(b) + c.AssertExpectations(t) + assert.Equal(t, "FROM: Failed to inspect image after pull: not-existing", err.Error()) +} diff --git a/src/rocker/build2/plan_test.go b/src/rocker/build2/plan_test.go index 6eaec3c1..4b643d25 100644 --- a/src/rocker/build2/plan_test.go +++ b/src/rocker/build2/plan_test.go @@ -256,10 +256,28 @@ TAG my-build } } +func TestPlan_Scratch(t *testing.T) { + p := makePlan(t, ` +FROM scratch +COPY rootfs / +`) + + expected := []Command{ + &CommandFrom{}, + &CommandCopy{}, + &CommandCommit{}, + } + + assert.Len(t, p, len(expected)) + for i, c := range expected { + assert.IsType(t, c, p[i]) + } +} + // internal helpers func makePlan(t *testing.T, rockerfileContent string) Plan { - b := makeBuild(t, rockerfileContent, BuildConfig{}) + b, _ := makeBuild(t, rockerfileContent, BuildConfig{}) p, err := NewPlan(b) if err != nil { From 57d993a45d3c81019e6522ac2a171df7b39fa244 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Sat, 19 Sep 2015 23:51:47 +0300 Subject: [PATCH 005/131] FROM save container cfg and support --pull --- src/rocker/build2/build.go | 1 + src/rocker/build2/build_test.go | 2 +- src/rocker/build2/commands.go | 8 ++++-- src/rocker/build2/commands_test.go | 40 +++++++++++++++++++++++++++--- 4 files changed, 45 insertions(+), 6 deletions(-) diff --git a/src/rocker/build2/build.go b/src/rocker/build2/build.go index 3a7bd913..f9e30c37 100644 --- a/src/rocker/build2/build.go +++ b/src/rocker/build2/build.go @@ -34,6 +34,7 @@ type BuildConfig struct { Auth *docker.AuthConfiguration Vars template.Vars ContextDir string + Pull bool } type Build struct { diff --git a/src/rocker/build2/build_test.go b/src/rocker/build2/build_test.go index 723762d6..6eebae8d 100644 --- a/src/rocker/build2/build_test.go +++ b/src/rocker/build2/build_test.go @@ -45,7 +45,7 @@ func makeBuild(t *testing.T, rockerfileContent string, cfg BuildConfig) (*Build, c := &MockClient{} - b, err := New(c, r, BuildConfig{}) + b, err := New(c, r, cfg) if err != nil { t.Fatal(err) } diff --git a/src/rocker/build2/commands.go b/src/rocker/build2/commands.go index a2387441..2f20d8b3 100644 --- a/src/rocker/build2/commands.go +++ b/src/rocker/build2/commands.go @@ -67,8 +67,11 @@ func (c *CommandFrom) Execute(b *Build) (err error) { name = c.cfg.args[0] ) - if img, err = b.client.InspectImage(name); err != nil { - return err + // If Pull is true, then img will remain nil and it will be pulled below + if !b.cfg.Pull { + if img, err = b.client.InspectImage(name); err != nil { + return err + } } if img == nil { @@ -84,6 +87,7 @@ func (c *CommandFrom) Execute(b *Build) (err error) { } b.imageID = img.ID + b.container = img.Config return nil } diff --git a/src/rocker/build2/commands_test.go b/src/rocker/build2/commands_test.go index 8478b9a0..4ac81216 100644 --- a/src/rocker/build2/commands_test.go +++ b/src/rocker/build2/commands_test.go @@ -29,7 +29,14 @@ func TestCommandFrom_Existing(t *testing.T) { args: []string{"existing"}, }} - c.On("InspectImage", "existing").Return(&docker.Image{ID: "123"}, nil) + img := &docker.Image{ + ID: "123", + Config: &docker.Config{ + Hostname: "localhost", + }, + } + + c.On("InspectImage", "existing").Return(img, nil).Once() err := cmd.Execute(b) if err != nil { @@ -38,6 +45,33 @@ func TestCommandFrom_Existing(t *testing.T) { c.AssertExpectations(t) assert.Equal(t, "123", b.imageID) + assert.Equal(t, "localhost", b.container.Hostname) +} + +func TestCommandFrom_PullExisting(t *testing.T) { + b, c := makeBuild(t, "", BuildConfig{Pull: true}) + cmd := &CommandFrom{ConfigCommand{ + args: []string{"existing"}, + }} + + img := &docker.Image{ + ID: "123", + Config: &docker.Config{ + Hostname: "localhost", + }, + } + + c.On("PullImage", "existing").Return(nil).Once() + c.On("InspectImage", "existing").Return(img, nil).Once() + + err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + c.AssertExpectations(t) + assert.Equal(t, "123", b.imageID) + assert.Equal(t, "localhost", b.container.Hostname) } func TestCommandFrom_NotExisting(t *testing.T) { @@ -46,9 +80,9 @@ func TestCommandFrom_NotExisting(t *testing.T) { args: []string{"not-existing"}, }} - var img *docker.Image + var nilImg *docker.Image - c.On("InspectImage", "not-existing").Return(img, nil).Once() + c.On("InspectImage", "not-existing").Return(nilImg, nil).Once() c.On("PullImage", "not-existing").Return(nil).Once() c.On("InspectImage", "not-existing").Return(&docker.Image{ID: "123"}, nil).Once() From 4687f5021f72714205d39088f4b3124ecaeff71a Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Sun, 20 Sep 2015 00:15:08 +0300 Subject: [PATCH 006/131] readme: note about v1 --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index e53380d2..ff91ea8b 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,9 @@ Rocker breaks the limits of Dockerfile. It adds some crucial features that are missing while keeping Docker’s original design and idea. Read the [blog post](http://tech.grammarly.com/blog/posts/Making-Docker-Rock-at-Grammarly.html) about how and why it was invented. +# *NOTE on v1 branch* +In this branch we are developing the new experimental implementation of Rocker that will be completely client-side driven, with no fallback on `docker build`. This means faster builds and more power. No build context uploads anymore. Also, the builder code is completely rewritten and made much more testable and extensible in the future. Caching might be also rethought. Cross-server builds determinism is our dream. + * [Installation](#installation) * [Rockerfile](#rockerfile) * [MOUNT](#mount) From 40245ae224f3ebf8f93385ca6d99141f9113c416 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Sun, 20 Sep 2015 00:16:48 +0300 Subject: [PATCH 007/131] readme: v1 note separator --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index ff91ea8b..cd10514e 100644 --- a/README.md +++ b/README.md @@ -5,6 +5,8 @@ Rocker breaks the limits of Dockerfile. It adds some crucial features that are m # *NOTE on v1 branch* In this branch we are developing the new experimental implementation of Rocker that will be completely client-side driven, with no fallback on `docker build`. This means faster builds and more power. No build context uploads anymore. Also, the builder code is completely rewritten and made much more testable and extensible in the future. Caching might be also rethought. Cross-server builds determinism is our dream. +--- + * [Installation](#installation) * [Rockerfile](#rockerfile) * [MOUNT](#mount) From e10e1b992b88a489064fc7c311b84eb3382f0877 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Sun, 20 Sep 2015 00:38:37 +0300 Subject: [PATCH 008/131] pass immutable state between commands --- src/rocker/build2/build.go | 14 ++++++--- src/rocker/build2/commands.go | 49 ++++++++++++++++-------------- src/rocker/build2/commands_test.go | 25 +++++++++------ 3 files changed, 51 insertions(+), 37 deletions(-) diff --git a/src/rocker/build2/build.go b/src/rocker/build2/build.go index f9e30c37..455a9e37 100644 --- a/src/rocker/build2/build.go +++ b/src/rocker/build2/build.go @@ -37,13 +37,16 @@ type BuildConfig struct { Pull bool } +type State struct { + container docker.Config + imageID string +} + type Build struct { rockerfile *Rockerfile cfg BuildConfig - container *docker.Config client Client - - imageID string + state State } func New(client Client, rockerfile *Rockerfile, cfg BuildConfig) (b *Build, err error) { @@ -51,15 +54,16 @@ func New(client Client, rockerfile *Rockerfile, cfg BuildConfig) (b *Build, err rockerfile: rockerfile, cfg: cfg, client: client, + state: State{}, } return b, nil } -func (b *Build) Run(plan Plan) error { +func (b *Build) Run(plan Plan) (err error) { for k, c := range plan { fmt.Printf("Step %d: %q\n", k, c) - if err := c.Execute(b); err != nil { + if b.state, err = c.Execute(b); err != nil { return err } } diff --git a/src/rocker/build2/commands.go b/src/rocker/build2/commands.go index 2f20d8b3..cbee0786 100644 --- a/src/rocker/build2/commands.go +++ b/src/rocker/build2/commands.go @@ -31,7 +31,11 @@ type ConfigCommand struct { } type Command interface { - Execute(b *Build) error + // Execute does the command execution and returns modified state. + // Note that here we use State not by reference because we want + // it to be immutable. In future, it may encoded/decoded from json + // and passed to the external command implementations. + Execute(b *Build) (State, error) } func NewCommand(cfg ConfigCommand) (Command, error) { @@ -55,11 +59,11 @@ type CommandFrom struct { cfg ConfigCommand } -func (c *CommandFrom) Execute(b *Build) (err error) { +func (c *CommandFrom) Execute(b *Build) (state State, err error) { // TODO: for "scratch" image we may use /images/create if len(c.cfg.args) != 1 { - return fmt.Errorf("FROM requires one argument") + return state, fmt.Errorf("FROM requires one argument") } var ( @@ -70,68 +74,69 @@ func (c *CommandFrom) Execute(b *Build) (err error) { // If Pull is true, then img will remain nil and it will be pulled below if !b.cfg.Pull { if img, err = b.client.InspectImage(name); err != nil { - return err + return state, err } } if img == nil { if err = b.client.PullImage(name); err != nil { - return err + return state, err } if img, err = b.client.InspectImage(name); err != nil { - return err + return state, err } if img == nil { - return fmt.Errorf("FROM: Failed to inspect image after pull: %s", name) + return state, fmt.Errorf("FROM: Failed to inspect image after pull: %s", name) } } - b.imageID = img.ID - b.container = img.Config + state = b.state + state.imageID = img.ID + state.container = *img.Config - return nil + return state, nil } type CommandReset struct{} -func (c *CommandReset) Execute(b *Build) error { - return nil +func (c *CommandReset) Execute(b *Build) (State, error) { + return b.state, nil } type CommandCommit struct{} -func (c *CommandCommit) Execute(b *Build) error { - return nil +func (c *CommandCommit) Execute(b *Build) (State, error) { + return b.state, nil } type CommandRun struct { cfg ConfigCommand } -func (c *CommandRun) Execute(b *Build) error { - return nil +func (c *CommandRun) Execute(b *Build) (State, error) { + return b.state, nil } type CommandEnv struct { cfg ConfigCommand } -func (c *CommandEnv) Execute(b *Build) error { - return nil +func (c *CommandEnv) Execute(b *Build) (State, error) { + return b.state, nil } type CommandTag struct { cfg ConfigCommand } -func (c *CommandTag) Execute(b *Build) error { - return nil +func (c *CommandTag) Execute(b *Build) (State, error) { + return b.state, nil } type CommandCopy struct { cfg ConfigCommand } -func (c *CommandCopy) Execute(b *Build) error { - return nil +func (c *CommandCopy) Execute(b *Build) (State, error) { + return b.state, nil } diff --git a/src/rocker/build2/commands_test.go b/src/rocker/build2/commands_test.go index 4ac81216..3ded4958 100644 --- a/src/rocker/build2/commands_test.go +++ b/src/rocker/build2/commands_test.go @@ -38,14 +38,14 @@ func TestCommandFrom_Existing(t *testing.T) { c.On("InspectImage", "existing").Return(img, nil).Once() - err := cmd.Execute(b) + state, err := cmd.Execute(b) if err != nil { t.Fatal(err) } c.AssertExpectations(t) - assert.Equal(t, "123", b.imageID) - assert.Equal(t, "localhost", b.container.Hostname) + assert.Equal(t, "123", state.imageID) + assert.Equal(t, "localhost", state.container.Hostname) } func TestCommandFrom_PullExisting(t *testing.T) { @@ -64,14 +64,14 @@ func TestCommandFrom_PullExisting(t *testing.T) { c.On("PullImage", "existing").Return(nil).Once() c.On("InspectImage", "existing").Return(img, nil).Once() - err := cmd.Execute(b) + state, err := cmd.Execute(b) if err != nil { t.Fatal(err) } c.AssertExpectations(t) - assert.Equal(t, "123", b.imageID) - assert.Equal(t, "localhost", b.container.Hostname) + assert.Equal(t, "123", state.imageID) + assert.Equal(t, "localhost", state.container.Hostname) } func TestCommandFrom_NotExisting(t *testing.T) { @@ -82,17 +82,22 @@ func TestCommandFrom_NotExisting(t *testing.T) { var nilImg *docker.Image + img := &docker.Image{ + ID: "123", + Config: &docker.Config{}, + } + c.On("InspectImage", "not-existing").Return(nilImg, nil).Once() c.On("PullImage", "not-existing").Return(nil).Once() - c.On("InspectImage", "not-existing").Return(&docker.Image{ID: "123"}, nil).Once() + c.On("InspectImage", "not-existing").Return(img, nil).Once() - err := cmd.Execute(b) + state, err := cmd.Execute(b) if err != nil { t.Fatal(err) } c.AssertExpectations(t) - assert.Equal(t, "123", b.imageID) + assert.Equal(t, "123", state.imageID) } func TestCommandFrom_AfterPullNotExisting(t *testing.T) { @@ -106,7 +111,7 @@ func TestCommandFrom_AfterPullNotExisting(t *testing.T) { c.On("InspectImage", "not-existing").Return(nilImg, nil).Twice() c.On("PullImage", "not-existing").Return(nil).Once() - err := cmd.Execute(b) + _, err := cmd.Execute(b) c.AssertExpectations(t) assert.Equal(t, "FROM: Failed to inspect image after pull: not-existing", err.Error()) } From ff3037c865309e90711ed92c7cd284fd609077d4 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Sun, 20 Sep 2015 10:37:35 +0300 Subject: [PATCH 009/131] implemented real execution of FROM, refactored commands --- src/rocker/build2/build.go | 16 +++++----- src/rocker/build2/client.go | 59 +++++++++++++++++++++++++++++++++-- src/rocker/build2/commands.go | 42 ++++++++++++++++++++++++- src/rocker/build2/plan.go | 4 +-- 4 files changed, 107 insertions(+), 14 deletions(-) diff --git a/src/rocker/build2/build.go b/src/rocker/build2/build.go index 455a9e37..7455931b 100644 --- a/src/rocker/build2/build.go +++ b/src/rocker/build2/build.go @@ -19,7 +19,6 @@ package build2 import ( "fmt" "io" - "rocker/template" "github.com/fsouza/go-dockerclient" ) @@ -31,8 +30,6 @@ var ( type BuildConfig struct { OutStream io.Writer InStream io.ReadCloser - Auth *docker.AuthConfiguration - Vars template.Vars ContextDir string Pull bool } @@ -49,23 +46,26 @@ type Build struct { state State } -func New(client Client, rockerfile *Rockerfile, cfg BuildConfig) (b *Build, err error) { - b = &Build{ +func New(client Client, rockerfile *Rockerfile, cfg BuildConfig) *Build { + return &Build{ rockerfile: rockerfile, cfg: cfg, client: client, state: State{}, } - - return b, nil } func (b *Build) Run(plan Plan) (err error) { for k, c := range plan { - fmt.Printf("Step %d: %q\n", k, c) + // fmt.Printf("Step %d: %# v\n", k+1, pretty.Formatter(c)) + fmt.Printf("Step %d: %s\n", k+1, c) if b.state, err = c.Execute(b); err != nil { return err } } return nil } + +func (b *Build) GetState() State { + return b.state +} diff --git a/src/rocker/build2/client.go b/src/rocker/build2/client.go index a79c9457..bdc51c6d 100644 --- a/src/rocker/build2/client.go +++ b/src/rocker/build2/client.go @@ -18,7 +18,11 @@ package build2 import ( "fmt" + "io" + "rocker/imagename" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/term" "github.com/fsouza/go-dockerclient" ) @@ -27,20 +31,69 @@ type Client interface { PullImage(name string) error } +type DockerClientConfig struct { + Client *docker.Client + OutStream io.Writer + InStream io.ReadCloser + Auth *docker.AuthConfiguration +} + type DockerClient struct { client *docker.Client + cfg DockerClientConfig } -func NewDockerClient(dockerClient *docker.Client) *DockerClient { +func NewDockerClient(dockerClient *docker.Client, cfg DockerClientConfig) *DockerClient { return &DockerClient{ client: dockerClient, + cfg: cfg, } } func (c *DockerClient) InspectImage(name string) (*docker.Image, error) { - return c.client.InspectImage(name) + img, err := c.client.InspectImage(name) + // We simply return nil in case image not found + if err == docker.ErrNoSuchImage { + return nil, nil + } + return img, err } func (c *DockerClient) PullImage(name string) error { - return fmt.Errorf("PullImage not implemented yet") + + var ( + fdOut, isTerminalOut = term.GetFdInfo(c.cfg.OutStream) + image = imagename.NewFromString(name) + pipeReader, pipeWriter = io.Pipe() + errch = make(chan error) + ) + + pullOpts := docker.PullImageOptions{ + Repository: image.NameWithRegistry(), + Registry: image.Registry, + Tag: image.GetTag(), + OutputStream: pipeWriter, + RawJSONStream: true, + } + + go func() { + err := c.client.PullImage(pullOpts, *c.cfg.Auth) + + if err := pipeWriter.Close(); err != nil { + // TODO: logrus error + fmt.Printf("pipeWriter.Close() err: %s\n", err) + } + + errch <- err + }() + + if err := jsonmessage.DisplayJSONMessagesStream(pipeReader, c.cfg.OutStream, fdOut, isTerminalOut); err != nil { + return fmt.Errorf("Failed to process json stream for image: %s, error: %s", image, err) + } + + if err := <-errch; err != nil { + return fmt.Errorf("Failed to pull image: %s, error: %s", image, err) + } + + return nil } diff --git a/src/rocker/build2/commands.go b/src/rocker/build2/commands.go index cbee0786..568d3aef 100644 --- a/src/rocker/build2/commands.go +++ b/src/rocker/build2/commands.go @@ -36,6 +36,9 @@ type Command interface { // it to be immutable. In future, it may encoded/decoded from json // and passed to the external command implementations. Execute(b *Build) (State, error) + + // String returns the human readable string representation of the command + String() string } func NewCommand(cfg ConfigCommand) (Command, error) { @@ -55,10 +58,15 @@ func NewCommand(cfg ConfigCommand) (Command, error) { return nil, fmt.Errorf("Unknown command: %s", cfg.name) } +// CommandFrom implements FROM type CommandFrom struct { cfg ConfigCommand } +func (c *CommandFrom) String() string { + return c.cfg.original +} + func (c *CommandFrom) Execute(b *Build) (state State, err error) { // TODO: for "scratch" image we may use /images/create @@ -97,46 +105,78 @@ func (c *CommandFrom) Execute(b *Build) (state State, err error) { return state, nil } +// CommandReset cleans the builder state before the next FROM type CommandReset struct{} +func (c *CommandReset) String() string { + return "Cleaning up state before the next FROM" +} + func (c *CommandReset) Execute(b *Build) (State, error) { - return b.state, nil + state := b.state + state.imageID = "" + return state, nil } +// CommandCommit commits collected changes type CommandCommit struct{} +func (c *CommandCommit) String() string { + return "Committing changes" +} + func (c *CommandCommit) Execute(b *Build) (State, error) { return b.state, nil } +// CommandRun implements RUN type CommandRun struct { cfg ConfigCommand } +func (c *CommandRun) String() string { + return c.cfg.original +} + func (c *CommandRun) Execute(b *Build) (State, error) { return b.state, nil } +// CommandEnv implements ENV type CommandEnv struct { cfg ConfigCommand } +func (c *CommandEnv) String() string { + return c.cfg.original +} + func (c *CommandEnv) Execute(b *Build) (State, error) { return b.state, nil } +// CommandTag implements TAG type CommandTag struct { cfg ConfigCommand } +func (c *CommandTag) String() string { + return c.cfg.original +} + func (c *CommandTag) Execute(b *Build) (State, error) { return b.state, nil } +// CommandCopy implements COPY type CommandCopy struct { cfg ConfigCommand } +func (c *CommandCopy) String() string { + return c.cfg.original +} + func (c *CommandCopy) Execute(b *Build) (State, error) { return b.state, nil } diff --git a/src/rocker/build2/plan.go b/src/rocker/build2/plan.go index b1216073..fd92442d 100644 --- a/src/rocker/build2/plan.go +++ b/src/rocker/build2/plan.go @@ -54,7 +54,7 @@ func NewPlan(b *Build) (plan Plan, err error) { } } - // Commit before commands that require our state + // Commit before commands that require state if strings.Contains(alwaysCommitBefore, cfg.name) && !committed { commit() } @@ -70,7 +70,7 @@ func NewPlan(b *Build) (plan Plan, err error) { committed = false // If we reached the end of Rockerfile, do the final commit - // As you noticed, the final commit will not happen in the last + // As you noticed, the final commit will not happen if the last // command was TAG, PUSH or FROM if i == len(commands)-1 { commit() From 209ae3af7e755c06fd81d170a3e9144e315503f1 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Sun, 20 Sep 2015 10:37:44 +0300 Subject: [PATCH 010/131] main now executes build2 --- src/cmd/rocker/main.go | 155 ++++++++++++++++++++++++++--------------- 1 file changed, 98 insertions(+), 57 deletions(-) diff --git a/src/cmd/rocker/main.go b/src/cmd/rocker/main.go index 18c0a4e5..5be8d345 100644 --- a/src/cmd/rocker/main.go +++ b/src/cmd/rocker/main.go @@ -26,13 +26,14 @@ import ( "strings" "rocker/build" + "rocker/build2" "rocker/dockerclient" - "rocker/git" "rocker/imagename" "rocker/template" "github.com/codegangsta/cli" "github.com/fsouza/go-dockerclient" + "github.com/kr/pretty" ) var ( @@ -163,62 +164,79 @@ func main() { } func buildCommand(c *cli.Context) { - configFilename := c.String("file") - wd, err := os.Getwd() + var ( + rockerfile *build2.Rockerfile + err error + ) + + cliVars, err := template.VarsFromStrings(c.StringSlice("var")) if err != nil { log.Fatal(err) } - if !filepath.IsAbs(configFilename) { - configFilename = filepath.Clean(path.Join(wd, configFilename)) - } + vars := template.Vars{}.Merge(cliVars) - // we do not want to outpu anything if "print" was asked - // TODO: find a more clean way to suppress output - if !c.Bool("print") { - fmt.Printf("[Rocker] Building...\n") - } + // obtain git info about current directory + // gitInfo, err := git.Info(filepath.Dir(configFilename)) + // if err != nil { + // // Ignore if given directory is not a git repo + // if _, ok := err.(*git.ErrNotGitRepo); !ok { + // log.Fatal(err) + // } + // } + + // // some additional useful vars + // vars["commit"] = stringOr(os.Getenv("GIT_COMMIT"), gitInfo.Sha) + // vars["branch"] = stringOr(os.Getenv("GIT_BRANCH"), gitInfo.Branch) + // vars["git_url"] = stringOr(os.Getenv("GIT_URL"), gitInfo.URL) + // vars["commit_message"] = gitInfo.Message + // vars["commit_author"] = gitInfo.Author - dockerClient, err := dockerclient.NewFromCli(c) + wd, err := os.Getwd() if err != nil { log.Fatal(err) } - // Initialize context dir - args := c.Args() - contextDir := filepath.Dir(configFilename) - if len(args) > 0 { - if filepath.IsAbs(args[0]) { + configFilename := c.String("file") + contextDir := wd + + if configFilename == "-" { + + rockerfile, err = build2.NewRockerfile(path.Base(wd), os.Stdin, vars, template.Funs{}) + if err != nil { + log.Fatal(err) + } + + } else { + + if !filepath.IsAbs(configFilename) { + configFilename = path.Join(wd, configFilename) + } + + rockerfile, err = build2.NewRockerfileFromFile(configFilename, vars, template.Funs{}) + + // Initialize context dir + contextDir = filepath.Dir(configFilename) + args := c.Args() + if len(args) > 0 { contextDir = args[0] - } else { - contextDir = filepath.Clean(path.Join(wd, args[0])) + if !filepath.IsAbs(contextDir) { + contextDir = path.Join(wd, args[0]) + } } } - cliVars, err := template.VarsFromStrings(c.StringSlice("var")) - if err != nil { - log.Fatal(err) + if c.Bool("print") { + fmt.Print(rockerfile.Content) + os.Exit(0) } - vars := template.Vars{}.Merge(cliVars) - - // obtain git info about current directory - gitInfo, err := git.Info(filepath.Dir(configFilename)) + dockerClient, err := dockerclient.NewFromCli(c) if err != nil { - // Ignore if given directory is not a git repo - if _, ok := err.(*git.ErrNotGitRepo); !ok { - log.Fatal(err) - } + log.Fatal(err) } - // some additional useful vars - vars["commit"] = stringOr(os.Getenv("GIT_COMMIT"), gitInfo.Sha) - vars["branch"] = stringOr(os.Getenv("GIT_BRANCH"), gitInfo.Branch) - vars["git_url"] = stringOr(os.Getenv("GIT_URL"), gitInfo.URL) - vars["commit_message"] = gitInfo.Message - vars["commit_author"] = gitInfo.Author - auth := &docker.AuthConfiguration{} authParam := c.String("auth") if strings.Contains(authParam, ":") { @@ -227,30 +245,53 @@ func buildCommand(c *cli.Context) { auth.Password = userPass[1] } - builder := build.Builder{ - Rockerfile: configFilename, - ContextDir: contextDir, - UtilizeCache: !c.Bool("no-cache"), - Push: c.Bool("push"), - NoReuse: c.Bool("no-reuse"), - Verbose: c.Bool("verbose"), - Attach: c.Bool("attach"), - Print: c.Bool("print"), - Auth: auth, - Vars: vars, - CliVars: cliVars, - InStream: os.Stdin, - OutStream: os.Stdout, - Docker: dockerClient, - AddMeta: c.Bool("meta"), - Pull: c.Bool("pull"), - ID: c.String("id"), - ArtifactsPath: c.String("artifacts-path"), + client := build2.NewDockerClient(dockerClient, build2.DockerClientConfig{ + InStream: os.Stdin, + OutStream: os.Stdout, + Auth: auth, + }) + + builder := build2.New(client, rockerfile, build2.BuildConfig{ + InStream: os.Stdin, + OutStream: os.Stdout, + ContextDir: contextDir, + Pull: c.Bool("pull"), + }) + + plan, err := build2.NewPlan(builder) + if err != nil { + log.Fatal(err) } - if _, err := builder.Build(); err != nil { + if err := builder.Run(plan); err != nil { log.Fatal(err) } + + pretty.Println(builder.GetState()) + + // builder := build.Builder{ + // Rockerfile: configFilename, + // ContextDir: contextDir, + // UtilizeCache: !c.Bool("no-cache"), + // Push: c.Bool("push"), + // NoReuse: c.Bool("no-reuse"), + // Verbose: c.Bool("verbose"), + // Attach: c.Bool("attach"), + // Print: c.Bool("print"), + // Auth: auth, + // Vars: vars, + // CliVars: cliVars, + // InStream: os.Stdin, + // OutStream: os.Stdout, + // Docker: dockerClient, + // AddMeta: c.Bool("meta"), + // Pull: c.Bool("pull"), + // ID: c.String("id"), + // } + + // if _, err := builder.Build(); err != nil { + // log.Fatal(err) + // } } func showCommand(c *cli.Context) { From 85e3946e441db190f5f15c8f8ca2c1cd90f5e97a Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Sun, 20 Sep 2015 10:52:34 +0300 Subject: [PATCH 011/131] integrate logrus --- src/cmd/rocker/main.go | 37 +++++++++++++++++++++++++------------ src/rocker/build2/build.go | 12 +++++++++--- src/rocker/build2/client.go | 30 +++++++++++++++--------------- 3 files changed, 49 insertions(+), 30 deletions(-) diff --git a/src/cmd/rocker/main.go b/src/cmd/rocker/main.go index 5be8d345..fc9f2fd7 100644 --- a/src/cmd/rocker/main.go +++ b/src/cmd/rocker/main.go @@ -19,7 +19,6 @@ package main import ( "encoding/json" "fmt" - "log" "os" "path" "path/filepath" @@ -33,7 +32,8 @@ import ( "github.com/codegangsta/cli" "github.com/fsouza/go-dockerclient" - "github.com/kr/pretty" + + log "github.com/Sirupsen/logrus" ) var ( @@ -50,6 +50,11 @@ var ( BuildTime = "none" ) +func init() { + log.SetOutput(os.Stdout) + log.SetLevel(log.InfoLevel) +} + func main() { app := cli.NewApp() @@ -67,8 +72,10 @@ func main() { app.Flags = append([]cli.Flag{ cli.BoolFlag{ - Name: "verbose", - Usage: "enables verbose output", + Name: "verbose, vv", + }, + cli.BoolFlag{ + Name: "json", }, }, dockerclient.GlobalCliParams()...) @@ -170,6 +177,8 @@ func buildCommand(c *cli.Context) { err error ) + initLogs(c) + cliVars, err := template.VarsFromStrings(c.StringSlice("var")) if err != nil { log.Fatal(err) @@ -237,7 +246,7 @@ func buildCommand(c *cli.Context) { log.Fatal(err) } - auth := &docker.AuthConfiguration{} + auth := docker.AuthConfiguration{} authParam := c.String("auth") if strings.Contains(authParam, ":") { userPass := strings.Split(authParam, ":") @@ -245,11 +254,7 @@ func buildCommand(c *cli.Context) { auth.Password = userPass[1] } - client := build2.NewDockerClient(dockerClient, build2.DockerClientConfig{ - InStream: os.Stdin, - OutStream: os.Stdout, - Auth: auth, - }) + client := build2.NewDockerClient(dockerClient, auth) builder := build2.New(client, rockerfile, build2.BuildConfig{ InStream: os.Stdin, @@ -267,8 +272,6 @@ func buildCommand(c *cli.Context) { log.Fatal(err) } - pretty.Println(builder.GetState()) - // builder := build.Builder{ // Rockerfile: configFilename, // ContextDir: contextDir, @@ -390,6 +393,16 @@ func cleanCommand(c *cli.Context) { fmt.Println(verbose) } +func initLogs(ctx *cli.Context) { + if ctx.GlobalBool("verbose") { + log.SetLevel(log.DebugLevel) + } + + if ctx.GlobalBool("json") { + log.SetFormatter(&log.JSONFormatter{}) + } +} + func stringOr(args ...string) string { for _, str := range args { if str != "" { diff --git a/src/rocker/build2/build.go b/src/rocker/build2/build.go index 7455931b..33ba11ac 100644 --- a/src/rocker/build2/build.go +++ b/src/rocker/build2/build.go @@ -17,10 +17,12 @@ package build2 import ( - "fmt" "io" "github.com/fsouza/go-dockerclient" + "github.com/kr/pretty" + + log "github.com/Sirupsen/logrus" ) var ( @@ -57,11 +59,15 @@ func New(client Client, rockerfile *Rockerfile, cfg BuildConfig) *Build { func (b *Build) Run(plan Plan) (err error) { for k, c := range plan { - // fmt.Printf("Step %d: %# v\n", k+1, pretty.Formatter(c)) - fmt.Printf("Step %d: %s\n", k+1, c) + + log.Debugf("Step %d: %# v", k+1, pretty.Formatter(c)) + log.Infof("Step %d: %s", k+1, c) + if b.state, err = c.Execute(b); err != nil { return err } + + log.Debugf("State after step %d: %# v", k+1, pretty.Formatter(b.state)) } return nil } diff --git a/src/rocker/build2/client.go b/src/rocker/build2/client.go index bdc51c6d..0c2c3d9d 100644 --- a/src/rocker/build2/client.go +++ b/src/rocker/build2/client.go @@ -24,6 +24,8 @@ import ( "github.com/docker/docker/pkg/jsonmessage" "github.com/docker/docker/pkg/term" "github.com/fsouza/go-dockerclient" + + log "github.com/Sirupsen/logrus" ) type Client interface { @@ -31,22 +33,15 @@ type Client interface { PullImage(name string) error } -type DockerClientConfig struct { - Client *docker.Client - OutStream io.Writer - InStream io.ReadCloser - Auth *docker.AuthConfiguration -} - type DockerClient struct { client *docker.Client - cfg DockerClientConfig + auth docker.AuthConfiguration } -func NewDockerClient(dockerClient *docker.Client, cfg DockerClientConfig) *DockerClient { +func NewDockerClient(dockerClient *docker.Client, auth docker.AuthConfiguration) *DockerClient { return &DockerClient{ client: dockerClient, - cfg: cfg, + auth: auth, } } @@ -62,12 +57,18 @@ func (c *DockerClient) InspectImage(name string) (*docker.Image, error) { func (c *DockerClient) PullImage(name string) error { var ( - fdOut, isTerminalOut = term.GetFdInfo(c.cfg.OutStream) image = imagename.NewFromString(name) pipeReader, pipeWriter = io.Pipe() + def = log.StandardLogger() + fdOut, isTerminalOut = term.GetFdInfo(def.Out) + out = def.Out errch = make(chan error) ) + if !isTerminalOut { + out = def.Writer() + } + pullOpts := docker.PullImageOptions{ Repository: image.NameWithRegistry(), Registry: image.Registry, @@ -77,17 +78,16 @@ func (c *DockerClient) PullImage(name string) error { } go func() { - err := c.client.PullImage(pullOpts, *c.cfg.Auth) + err := c.client.PullImage(pullOpts, c.auth) if err := pipeWriter.Close(); err != nil { - // TODO: logrus error - fmt.Printf("pipeWriter.Close() err: %s\n", err) + log.Errorf("pipeWriter.Close() err: %s\n", err) } errch <- err }() - if err := jsonmessage.DisplayJSONMessagesStream(pipeReader, c.cfg.OutStream, fdOut, isTerminalOut); err != nil { + if err := jsonmessage.DisplayJSONMessagesStream(pipeReader, out, fdOut, isTerminalOut); err != nil { return fmt.Errorf("Failed to process json stream for image: %s, error: %s", image, err) } From 93ccb14dbde88640adad2a19b02818e985179c64 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Sun, 20 Sep 2015 10:59:11 +0300 Subject: [PATCH 012/131] fix tests --- src/rocker/build2/build_test.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/src/rocker/build2/build_test.go b/src/rocker/build2/build_test.go index 6eebae8d..6906c9f1 100644 --- a/src/rocker/build2/build_test.go +++ b/src/rocker/build2/build_test.go @@ -44,11 +44,7 @@ func makeBuild(t *testing.T, rockerfileContent string, cfg BuildConfig) (*Build, } c := &MockClient{} - - b, err := New(c, r, cfg) - if err != nil { - t.Fatal(err) - } + b := New(c, r, cfg) return b, c } From 74baf09834a6087d08706dbd0d53264fe33ea3b8 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Sun, 20 Sep 2015 13:12:28 +0300 Subject: [PATCH 013/131] draft RUN implementation --- src/rocker/build2/build.go | 6 ++-- src/rocker/build2/build_test.go | 10 +++++++ src/rocker/build2/client.go | 34 +++++++++++++++++++++- src/rocker/build2/commands.go | 36 ++++++++++++++++++++++-- src/rocker/build2/commands_test.go | 45 ++++++++++++++++++++++++++++++ src/rocker/build2/rockerfile.go | 13 +++++++++ 6 files changed, 139 insertions(+), 5 deletions(-) diff --git a/src/rocker/build2/build.go b/src/rocker/build2/build.go index 33ba11ac..0553ac47 100644 --- a/src/rocker/build2/build.go +++ b/src/rocker/build2/build.go @@ -37,8 +37,10 @@ type BuildConfig struct { } type State struct { - container docker.Config - imageID string + container docker.Config + imageID string + containerID string + postCommit func(s State) (s1 State, err error) } type Build struct { diff --git a/src/rocker/build2/build_test.go b/src/rocker/build2/build_test.go index 6906c9f1..4620b193 100644 --- a/src/rocker/build2/build_test.go +++ b/src/rocker/build2/build_test.go @@ -62,3 +62,13 @@ func (m *MockClient) PullImage(name string) error { args := m.Called(name) return args.Error(0) } + +func (m *MockClient) CreateContainer(state State) (string, error) { + args := m.Called(state) + return args.String(0), args.Error(1) +} + +func (m *MockClient) RunContainer(containerID string, attach bool) error { + args := m.Called(containerID, attach) + return args.Error(0) +} diff --git a/src/rocker/build2/client.go b/src/rocker/build2/client.go index 0c2c3d9d..3ac7cfa0 100644 --- a/src/rocker/build2/client.go +++ b/src/rocker/build2/client.go @@ -31,6 +31,8 @@ import ( type Client interface { InspectImage(name string) (*docker.Image, error) PullImage(name string) error + CreateContainer(state State) (id string, err error) + RunContainer(containerID string, attach bool) error } type DockerClient struct { @@ -81,7 +83,7 @@ func (c *DockerClient) PullImage(name string) error { err := c.client.PullImage(pullOpts, c.auth) if err := pipeWriter.Close(); err != nil { - log.Errorf("pipeWriter.Close() err: %s\n", err) + log.Errorf("pipeWriter.Close() err: %s", err) } errch <- err @@ -97,3 +99,33 @@ func (c *DockerClient) PullImage(name string) error { return nil } + +func (c *DockerClient) CreateContainer(state State) (string, error) { + // volumesFrom := builder.getMountContainerIds() + // binds := builder.getBinds() + + state.container.Image = state.imageID + + // TODO: assign human readable name? + + opts := docker.CreateContainerOptions{ + Config: &state.container, + HostConfig: &docker.HostConfig{ + // Binds: binds, + // VolumesFrom: volumesFrom, + }, + } + + container, err := c.client.CreateContainer(opts) + if err != nil { + return "", err + } + + log.Infof(" ---> Created container %.12s (image id = %.12s)", container.ID, state.imageID) + + return container.ID, nil +} + +func (c *DockerClient) RunContainer(containerID string, attach bool) error { + return fmt.Errorf("RunContainer not implemented yet") +} diff --git a/src/rocker/build2/commands.go b/src/rocker/build2/commands.go index 568d3aef..f545c5a3 100644 --- a/src/rocker/build2/commands.go +++ b/src/rocker/build2/commands.go @@ -138,8 +138,40 @@ func (c *CommandRun) String() string { return c.cfg.original } -func (c *CommandRun) Execute(b *Build) (State, error) { - return b.state, nil +func (c *CommandRun) Execute(b *Build) (s State, err error) { + s = b.state + + if s.imageID == "" { + return s, fmt.Errorf("Please provide a source image with `FROM` prior to run") + } + + cmd := handleJSONArgs(c.cfg.args, c.cfg.attrs) + + if !c.cfg.attrs["json"] { + cmd = append([]string{"/bin/sh", "-c"}, cmd...) + } + + // TODO: test with ENTRYPOINT + + // We run this command in the container using CMD + origCmd := s.container.Cmd + s.container.Cmd = cmd + + // Restore command after commit + s.postCommit = func(s State) (State, error) { + s.container.Cmd = origCmd + return s, nil + } + + if s.containerID, err = b.client.CreateContainer(s); err != nil { + return s, err + } + + if err = b.client.RunContainer(s.containerID, false); err != nil { + return s, err + } + + return s, nil } // CommandEnv implements ENV diff --git a/src/rocker/build2/commands_test.go b/src/rocker/build2/commands_test.go index 3ded4958..4710ff24 100644 --- a/src/rocker/build2/commands_test.go +++ b/src/rocker/build2/commands_test.go @@ -19,10 +19,14 @@ package build2 import ( "testing" + "github.com/stretchr/testify/mock" + "github.com/fsouza/go-dockerclient" "github.com/stretchr/testify/assert" ) +// =========== Testing FROM =========== + func TestCommandFrom_Existing(t *testing.T) { b, c := makeBuild(t, "", BuildConfig{}) cmd := &CommandFrom{ConfigCommand{ @@ -115,3 +119,44 @@ func TestCommandFrom_AfterPullNotExisting(t *testing.T) { c.AssertExpectations(t) assert.Equal(t, "FROM: Failed to inspect image after pull: not-existing", err.Error()) } + +// =========== Testing RUN =========== + +func TestCommandRun_Simple(t *testing.T) { + b, c := makeBuild(t, "", BuildConfig{}) + cmd := &CommandRun{ConfigCommand{ + args: []string{"whoami"}, + }} + + origCmd := []string{"/bin/program"} + b.state.container.Cmd = origCmd + b.state.imageID = "123" + + c.On("CreateContainer", mock.AnythingOfType("State")).Return("456", nil).Run(func(args mock.Arguments) { + arg := args.Get(0).(State) + assert.Equal(t, []string{"/bin/sh", "-c", "whoami"}, arg.container.Cmd) + }).Once() + + c.On("RunContainer", "456", false).Return(nil).Once() + + state, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + c.AssertExpectations(t) + assert.Equal(t, origCmd, b.state.container.Cmd) + assert.Equal(t, "123", state.imageID) + assert.Equal(t, "456", state.containerID) + assert.Equal(t, []string{"/bin/sh", "-c", "whoami"}, state.container.Cmd) + + // testing cleanup + assert.NotNil(t, state.postCommit, "expected state.postCommit function to be set") + + state2, err := state.postCommit(state) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, origCmd, state2.container.Cmd) +} diff --git a/src/rocker/build2/rockerfile.go b/src/rocker/build2/rockerfile.go index 80d0d1e4..2b56a389 100644 --- a/src/rocker/build2/rockerfile.go +++ b/src/rocker/build2/rockerfile.go @@ -103,6 +103,19 @@ func (r *Rockerfile) Commands() []ConfigCommand { return commands } +func handleJSONArgs(args []string, attributes map[string]bool) []string { + if len(args) == 0 { + return []string{} + } + + if attributes != nil && attributes["json"] { + return args + } + + // literal string command, not an exec array + return []string{strings.Join(args, " ")} +} + func parseFlags(flags []string) map[string]string { result := make(map[string]string) for _, flag := range flags { From 4c5540d6bdcc2cc5e140321419a887c3a2d0c66f Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Sun, 20 Sep 2015 19:58:00 +0300 Subject: [PATCH 014/131] commit, run impl --- src/cmd/rocker/main.go | 2 + src/rocker/build2/build.go | 5 + src/rocker/build2/build_test.go | 10 ++ src/rocker/build2/client.go | 161 ++++++++++++++++++++++- src/rocker/build2/commands.go | 37 ++++-- src/rocker/build2/commands_test.go | 25 +++- src/rocker/build2/container_formatter.go | 49 +++++++ 7 files changed, 272 insertions(+), 17 deletions(-) create mode 100644 src/rocker/build2/container_formatter.go diff --git a/src/cmd/rocker/main.go b/src/cmd/rocker/main.go index fc9f2fd7..cc870c2c 100644 --- a/src/cmd/rocker/main.go +++ b/src/cmd/rocker/main.go @@ -272,6 +272,8 @@ func buildCommand(c *cli.Context) { log.Fatal(err) } + log.Infof("Successfully built %.12s", builder.GetImageID()) + // builder := build.Builder{ // Rockerfile: configFilename, // ContextDir: contextDir, diff --git a/src/rocker/build2/build.go b/src/rocker/build2/build.go index 0553ac47..4889753b 100644 --- a/src/rocker/build2/build.go +++ b/src/rocker/build2/build.go @@ -40,6 +40,7 @@ type State struct { container docker.Config imageID string containerID string + commitMsg []string postCommit func(s State) (s1 State, err error) } @@ -77,3 +78,7 @@ func (b *Build) Run(plan Plan) (err error) { func (b *Build) GetState() State { return b.state } + +func (b *Build) GetImageID() string { + return b.state.imageID +} diff --git a/src/rocker/build2/build_test.go b/src/rocker/build2/build_test.go index 4620b193..069076b5 100644 --- a/src/rocker/build2/build_test.go +++ b/src/rocker/build2/build_test.go @@ -72,3 +72,13 @@ func (m *MockClient) RunContainer(containerID string, attach bool) error { args := m.Called(containerID, attach) return args.Error(0) } + +func (m *MockClient) CommitContainer(state State, message string) (string, error) { + args := m.Called(state, message) + return args.String(0), args.Error(1) +} + +func (m *MockClient) RemoveContainer(containerID string) error { + args := m.Called(containerID) + return args.Error(0) +} diff --git a/src/rocker/build2/client.go b/src/rocker/build2/client.go index 3ac7cfa0..9eb66d18 100644 --- a/src/rocker/build2/client.go +++ b/src/rocker/build2/client.go @@ -19,6 +19,8 @@ package build2 import ( "fmt" "io" + "os" + "os/signal" "rocker/imagename" "github.com/docker/docker/pkg/jsonmessage" @@ -33,6 +35,8 @@ type Client interface { PullImage(name string) error CreateContainer(state State) (id string, err error) RunContainer(containerID string, attach bool) error + CommitContainer(state State, message string) (imageID string, err error) + RemoveContainer(containerID string) error } type DockerClient struct { @@ -101,6 +105,7 @@ func (c *DockerClient) PullImage(name string) error { } func (c *DockerClient) CreateContainer(state State) (string, error) { + // TODO: mount volumes // volumesFrom := builder.getMountContainerIds() // binds := builder.getBinds() @@ -121,11 +126,161 @@ func (c *DockerClient) CreateContainer(state State) (string, error) { return "", err } - log.Infof(" ---> Created container %.12s (image id = %.12s)", container.ID, state.imageID) + log.Infof(" | Created container %.12s (image %.12s)", container.ID, state.imageID) return container.ID, nil } -func (c *DockerClient) RunContainer(containerID string, attach bool) error { - return fmt.Errorf("RunContainer not implemented yet") +func (c *DockerClient) RunContainer(containerID string, attachStdin bool) error { + + var ( + success = make(chan struct{}) + def = log.StandardLogger() + + // Wrap output streams with logger + outLogger = &log.Logger{ + Out: def.Out, + Formatter: NewContainerFormatter(containerID, log.InfoLevel), + Level: def.Level, + } + errLogger = &log.Logger{ + Out: def.Out, + Formatter: NewContainerFormatter(containerID, log.ErrorLevel), + Level: def.Level, + } + ) + + attachOpts := docker.AttachToContainerOptions{ + Container: containerID, + OutputStream: outLogger.Writer(), + ErrorStream: errLogger.Writer(), + Stdout: true, + Stderr: true, + Stream: true, + Success: success, + } + + // TODO: will implement attach later + // if attachStdin { + // if !builder.isTerminalIn { + // return fmt.Errorf("Cannot attach to a container on non tty input") + // } + // oldState, err := term.SetRawTerminal(builder.fdIn) + // if err != nil { + // return err + // } + // defer term.RestoreTerminal(builder.fdIn, oldState) + + // attachOpts.InputStream = readerVoidCloser{builder.InStream} + // attachOpts.OutputStream = builder.OutStream + // attachOpts.ErrorStream = builder.OutStream + // attachOpts.Stdin = true + // attachOpts.RawTerminal = true + // } + + finished := make(chan struct{}, 1) + + go func() { + if err := c.client.AttachToContainer(attachOpts); err != nil { + select { + case <-finished: + // Ignore any attach errors when we have finished already. + // It may happen if we attach stdin, then container exit, but then there is other input from stdin continues. + // This is the case when multiple ATTACH command are used in a single Rockerfile. + // The problem though is that we cannot close stdin, to have it available for the subsequent ATTACH; + // therefore, hijack goroutine from the previous ATTACH will hang until the input received and then + // it will fire an error. + // It's ok for `rocker` since it is not a daemon, but rather a one-off command. + // + // Also, there is still a problem that `rocker` loses second character from the Stdin in a second ATTACH. + // But let's consider it a corner case. + default: + // Print the error. We cannot return it because the main routine is handing on WaitContaienr + log.Errorf("Got error while attaching to container %.12s: %s", containerID, err) + } + } + }() + + success <- <-success + + if err := c.client.StartContainer(containerID, &docker.HostConfig{}); err != nil { + return err + } + + // if attachStdin { + // if err := builder.monitorTtySize(containerID); err != nil { + // return fmt.Errorf("Failed to monitor TTY size for container %.12s, error: %s", containerID, err) + // } + // } + + // TODO: move signal handling to the builder? + + sigch := make(chan os.Signal, 1) + signal.Notify(sigch, os.Interrupt) + + errch := make(chan error) + + go func() { + statusCode, err := c.client.WaitContainer(containerID) + if err != nil { + errch <- err + } else if statusCode != 0 { + // Remove errored container + // TODO: make option to keep them + if err := c.RemoveContainer(containerID); err != nil { + log.Error(err) + } + + errch <- fmt.Errorf("Failed to run container, exit with code %d", statusCode) + } + errch <- nil + return + }() + + select { + case err := <-errch: + // indicate 'finished' so the `attach` goroutine will not give any errors + finished <- struct{}{} + if err != nil { + return err + } + case <-sigch: + log.Infof("Received SIGINT, remove current container...") + if err := c.RemoveContainer(containerID); err != nil { + log.Errorf("Failed to remove container: %s", err) + } + // TODO: send signal to builder.Run() and have a proper cleanup + os.Exit(2) + } + + return nil +} + +func (c *DockerClient) CommitContainer(state State, message string) (string, error) { + commitOpts := docker.CommitContainerOptions{ + Container: state.containerID, + Message: message, + Run: &state.container, + } + + image, err := c.client.CommitContainer(commitOpts) + if err != nil { + return "", err + } + + log.Infof(" | Result image is %.12s", image.ID) + + return image.ID, nil +} + +func (c *DockerClient) RemoveContainer(containerID string) error { + log.Infof(" | Removing container %.12s", containerID) + + opts := docker.RemoveContainerOptions{ + ID: containerID, + Force: true, + RemoveVolumes: true, + } + + return c.client.RemoveContainer(opts) } diff --git a/src/rocker/build2/commands.go b/src/rocker/build2/commands.go index f545c5a3..09469dda 100644 --- a/src/rocker/build2/commands.go +++ b/src/rocker/build2/commands.go @@ -18,6 +18,7 @@ package build2 import ( "fmt" + "strings" "github.com/fsouza/go-dockerclient" ) @@ -122,11 +123,32 @@ func (c *CommandReset) Execute(b *Build) (State, error) { type CommandCommit struct{} func (c *CommandCommit) String() string { - return "Committing changes" + return "Commit layers" } -func (c *CommandCommit) Execute(b *Build) (State, error) { - return b.state, nil +func (c *CommandCommit) Execute(b *Build) (s State, err error) { + s = b.state + + if s.containerID == "" { + return s, fmt.Errorf("TODO: committing on empty container not implemented yet") + } + + message := strings.Join(s.commitMsg, ";") + + if s.imageID, err = b.client.CommitContainer(s, message); err != nil { + return s, err + } + + // Reset collected commit messages after the commit + s.commitMsg = []string{} + + if err = b.client.RemoveContainer(s.containerID); err != nil { + return s, err + } + + s.containerID = "" + + return s, nil } // CommandRun implements RUN @@ -157,12 +179,6 @@ func (c *CommandRun) Execute(b *Build) (s State, err error) { origCmd := s.container.Cmd s.container.Cmd = cmd - // Restore command after commit - s.postCommit = func(s State) (State, error) { - s.container.Cmd = origCmd - return s, nil - } - if s.containerID, err = b.client.CreateContainer(s); err != nil { return s, err } @@ -171,6 +187,9 @@ func (c *CommandRun) Execute(b *Build) (s State, err error) { return s, err } + // Restore command after commit + s.container.Cmd = origCmd + return s, nil } diff --git a/src/rocker/build2/commands_test.go b/src/rocker/build2/commands_test.go index 4710ff24..747f476b 100644 --- a/src/rocker/build2/commands_test.go +++ b/src/rocker/build2/commands_test.go @@ -146,17 +146,32 @@ func TestCommandRun_Simple(t *testing.T) { c.AssertExpectations(t) assert.Equal(t, origCmd, b.state.container.Cmd) + assert.Equal(t, origCmd, state.container.Cmd) assert.Equal(t, "123", state.imageID) assert.Equal(t, "456", state.containerID) - assert.Equal(t, []string{"/bin/sh", "-c", "whoami"}, state.container.Cmd) +} + +// =========== Testing COMMIT =========== + +func TestCommandCommit_Simple(t *testing.T) { + b, c := makeBuild(t, "", BuildConfig{}) + cmd := &CommandCommit{} - // testing cleanup - assert.NotNil(t, state.postCommit, "expected state.postCommit function to be set") + origCommitMsg := []string{"a", "b"} + b.state.containerID = "456" + b.state.commitMsg = []string{"a", "b"} - state2, err := state.postCommit(state) + c.On("CommitContainer", mock.AnythingOfType("State"), "a;b").Return("789", nil).Once() + c.On("RemoveContainer", "456").Return(nil).Once() + + state, err := cmd.Execute(b) if err != nil { t.Fatal(err) } - assert.Equal(t, origCmd, state2.container.Cmd) + c.AssertExpectations(t) + assert.Equal(t, origCommitMsg, b.state.commitMsg) + assert.Equal(t, []string{}, state.commitMsg) + assert.Equal(t, "789", state.imageID) + assert.Equal(t, "", state.containerID) } diff --git a/src/rocker/build2/container_formatter.go b/src/rocker/build2/container_formatter.go new file mode 100644 index 00000000..9509416e --- /dev/null +++ b/src/rocker/build2/container_formatter.go @@ -0,0 +1,49 @@ +/*- + * Copyright 2015 Grammarly, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package build2 + +import ( + "fmt" + + log "github.com/Sirupsen/logrus" +) + +type formatter struct { + containerID string + level log.Level + delegate log.Formatter +} + +// NewContainerFormatter returns an object that is given to logrus to better format +// contaienr output +func NewContainerFormatter(containerID string, level log.Level) log.Formatter { + return &formatter{ + containerID: containerID, + level: level, + delegate: log.StandardLogger().Formatter, + } +} + +// Format formats a message from container +func (f *formatter) Format(entry *log.Entry) ([]byte, error) { + e := entry.WithFields(log.Fields{ + "container": fmt.Sprintf("%.12s", f.containerID), + }) + e.Message = entry.Message + e.Level = f.level + return f.delegate.Format(e) +} From 12613d6f97bd7615b5f7b71d1a45a6bac7f166a8 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Sun, 20 Sep 2015 20:40:37 +0300 Subject: [PATCH 015/131] ENV, CMD --- src/rocker/build2/client.go | 5 ++ src/rocker/build2/commands.go | 96 +++++++++++++++++++++++--- src/rocker/build2/commands_test.go | 105 ++++++++++++++++++++++++++++- 3 files changed, 196 insertions(+), 10 deletions(-) diff --git a/src/rocker/build2/client.go b/src/rocker/build2/client.go index 9eb66d18..67a97a33 100644 --- a/src/rocker/build2/client.go +++ b/src/rocker/build2/client.go @@ -26,6 +26,7 @@ import ( "github.com/docker/docker/pkg/jsonmessage" "github.com/docker/docker/pkg/term" "github.com/fsouza/go-dockerclient" + "github.com/kr/pretty" log "github.com/Sirupsen/logrus" ) @@ -121,6 +122,8 @@ func (c *DockerClient) CreateContainer(state State) (string, error) { }, } + log.Debugf("Create container: %# v", pretty.Formatter(opts)) + container, err := c.client.CreateContainer(opts) if err != nil { return "", err @@ -263,6 +266,8 @@ func (c *DockerClient) CommitContainer(state State, message string) (string, err Run: &state.container, } + log.Debugf("Commit container: %# v", pretty.Formatter(commitOpts)) + image, err := c.client.CommitContainer(commitOpts) if err != nil { return "", err diff --git a/src/rocker/build2/commands.go b/src/rocker/build2/commands.go index 09469dda..d1d0cba9 100644 --- a/src/rocker/build2/commands.go +++ b/src/rocker/build2/commands.go @@ -55,6 +55,8 @@ func NewCommand(cfg ConfigCommand) (Command, error) { return &CommandTag{cfg}, nil case "copy": return &CommandCopy{cfg}, nil + case "cmd": + return &CommandCmd{cfg}, nil } return nil, fmt.Errorf("Unknown command: %s", cfg.name) } @@ -129,19 +131,30 @@ func (c *CommandCommit) String() string { func (c *CommandCommit) Execute(b *Build) (s State, err error) { s = b.state + message := strings.Join(s.commitMsg, "; ") + + // Reset collected commit messages after the commit + s.commitMsg = []string{} + if s.containerID == "" { - return s, fmt.Errorf("TODO: committing on empty container not implemented yet") - } + if message == "" { + return s, fmt.Errorf("Nothing to commit, this might be a bug.") + } - message := strings.Join(s.commitMsg, ";") + origCmd := s.container.Cmd + s.container.Cmd = []string{"/bin/sh", "-c", "#(nop) " + message} + + if s.containerID, err = b.client.CreateContainer(s); err != nil { + return s, err + } + + s.container.Cmd = origCmd + } if s.imageID, err = b.client.CommitContainer(s, message); err != nil { return s, err } - // Reset collected commit messages after the commit - s.commitMsg = []string{} - if err = b.client.RemoveContainer(s.containerID); err != nil { return s, err } @@ -202,8 +215,75 @@ func (c *CommandEnv) String() string { return c.cfg.original } -func (c *CommandEnv) Execute(b *Build) (State, error) { - return b.state, nil +func (c *CommandEnv) Execute(b *Build) (s State, err error) { + + s = b.state + args := c.cfg.args + + if len(args) == 0 { + return s, fmt.Errorf("ENV requires at least one argument") + } + + if len(args)%2 != 0 { + // should never get here, but just in case + return s, fmt.Errorf("Bad input to ENV, too many args") + } + + commitStr := "ENV" + + for j := 0; j < len(args); j += 2 { + // name ==> args[j] + // value ==> args[j+1] + newVar := strings.Join(args[j:j+2], "=") + commitStr += " " + newVar + + gotOne := false + for i, envVar := range s.container.Env { + envParts := strings.SplitN(envVar, "=", 2) + if envParts[0] == args[j] { + s.container.Env[i] = newVar + gotOne = true + break + } + } + if !gotOne { + s.container.Env = append(s.container.Env, newVar) + } + } + + s.commitMsg = append(s.commitMsg, commitStr) + + return s, nil +} + +// CommandCmd implements CMD +type CommandCmd struct { + cfg ConfigCommand +} + +func (c *CommandCmd) String() string { + return c.cfg.original +} + +func (c *CommandCmd) Execute(b *Build) (s State, err error) { + s = b.state + + cmd := handleJSONArgs(c.cfg.args, c.cfg.attrs) + + if !c.cfg.attrs["json"] { + cmd = append([]string{"/bin/sh", "-c"}, cmd...) + } + + s.container.Cmd = cmd + + s.commitMsg = append(s.commitMsg, fmt.Sprintf("CMD %q", cmd)) + + // TODO: unsetting CMD? + // if len(args) != 0 { + // b.cmdSet = true + // } + + return s, nil } // CommandTag implements TAG diff --git a/src/rocker/build2/commands_test.go b/src/rocker/build2/commands_test.go index 747f476b..92330219 100644 --- a/src/rocker/build2/commands_test.go +++ b/src/rocker/build2/commands_test.go @@ -159,9 +159,9 @@ func TestCommandCommit_Simple(t *testing.T) { origCommitMsg := []string{"a", "b"} b.state.containerID = "456" - b.state.commitMsg = []string{"a", "b"} + b.state.commitMsg = origCommitMsg - c.On("CommitContainer", mock.AnythingOfType("State"), "a;b").Return("789", nil).Once() + c.On("CommitContainer", mock.AnythingOfType("State"), "a; b").Return("789", nil).Once() c.On("RemoveContainer", "456").Return(nil).Once() state, err := cmd.Execute(b) @@ -172,6 +172,107 @@ func TestCommandCommit_Simple(t *testing.T) { c.AssertExpectations(t) assert.Equal(t, origCommitMsg, b.state.commitMsg) assert.Equal(t, []string{}, state.commitMsg) + assert.Equal(t, []string(nil), state.container.Cmd) assert.Equal(t, "789", state.imageID) assert.Equal(t, "", state.containerID) } + +func TestCommandCommit_NoContainer(t *testing.T) { + b, c := makeBuild(t, "", BuildConfig{}) + cmd := &CommandCommit{} + + origCommitMsg := []string{"a", "b"} + b.state.commitMsg = origCommitMsg + + c.On("CreateContainer", mock.AnythingOfType("State")).Return("456", nil).Run(func(args mock.Arguments) { + arg := args.Get(0).(State) + assert.Equal(t, []string{"/bin/sh", "-c", "#(nop) a; b"}, arg.container.Cmd) + }).Once() + + c.On("CommitContainer", mock.AnythingOfType("State"), "a; b").Return("789", nil).Once() + c.On("RemoveContainer", "456").Return(nil).Once() + + state, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + c.AssertExpectations(t) + assert.Equal(t, origCommitMsg, b.state.commitMsg) + assert.Equal(t, []string{}, state.commitMsg) + assert.Equal(t, "789", state.imageID) + assert.Equal(t, "", state.containerID) +} + +func TestCommandCommit_NoCommitMsgs(t *testing.T) { + b, _ := makeBuild(t, "", BuildConfig{}) + cmd := &CommandCommit{} + + _, err := cmd.Execute(b) + assert.Contains(t, err.Error(), "Nothing to commit") +} + +// =========== Testing ENV =========== + +func TestCommandEnv_Simple(t *testing.T) { + b, _ := makeBuild(t, "", BuildConfig{}) + cmd := &CommandEnv{ConfigCommand{ + args: []string{"type", "web", "env", "prod"}, + }} + + state, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, []string{"ENV type=web env=prod"}, state.commitMsg) + assert.Equal(t, []string{"type=web", "env=prod"}, state.container.Env) +} + +func TestCommandEnv_Advanced(t *testing.T) { + b, _ := makeBuild(t, "", BuildConfig{}) + cmd := &CommandEnv{ConfigCommand{ + args: []string{"type", "web", "env", "prod"}, + }} + + b.state.container.Env = []string{"env=dev", "version=1.2.3"} + + state, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, []string{"ENV type=web env=prod"}, state.commitMsg) + assert.Equal(t, []string{"env=prod", "version=1.2.3", "type=web"}, state.container.Env) +} + +// =========== Testing CMD =========== + +func TestCommandCmd_Simple(t *testing.T) { + b, _ := makeBuild(t, "", BuildConfig{}) + cmd := &CommandCmd{ConfigCommand{ + args: []string{"apt-get", "install"}, + }} + + state, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, []string{"/bin/sh", "-c", "apt-get install"}, state.container.Cmd) +} + +func TestCommandCmd_Json(t *testing.T) { + b, _ := makeBuild(t, "", BuildConfig{}) + cmd := &CommandCmd{ConfigCommand{ + args: []string{"apt-get", "install"}, + attrs: map[string]bool{"json": true}, + }} + + state, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, []string{"apt-get", "install"}, state.container.Cmd) +} From 2b0ef0a44344dbea0b22abb981e041deba052773 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Sun, 20 Sep 2015 20:45:55 +0300 Subject: [PATCH 016/131] refactor variables and struct names --- src/cmd/rocker/main.go | 2 +- src/rocker/build2/build.go | 8 ++--- src/rocker/build2/build_test.go | 4 +-- src/rocker/build2/client.go | 14 ++++----- src/rocker/build2/commands.go | 22 ++++++------- src/rocker/build2/commands_test.go | 50 +++++++++++++++--------------- src/rocker/build2/plan_test.go | 2 +- 7 files changed, 51 insertions(+), 51 deletions(-) diff --git a/src/cmd/rocker/main.go b/src/cmd/rocker/main.go index cc870c2c..7085999a 100644 --- a/src/cmd/rocker/main.go +++ b/src/cmd/rocker/main.go @@ -256,7 +256,7 @@ func buildCommand(c *cli.Context) { client := build2.NewDockerClient(dockerClient, auth) - builder := build2.New(client, rockerfile, build2.BuildConfig{ + builder := build2.New(client, rockerfile, build2.Config{ InStream: os.Stdin, OutStream: os.Stdout, ContextDir: contextDir, diff --git a/src/rocker/build2/build.go b/src/rocker/build2/build.go index 4889753b..d508bb09 100644 --- a/src/rocker/build2/build.go +++ b/src/rocker/build2/build.go @@ -29,7 +29,7 @@ var ( NoBaseImageSpecifier = "scratch" ) -type BuildConfig struct { +type Config struct { OutStream io.Writer InStream io.ReadCloser ContextDir string @@ -37,7 +37,7 @@ type BuildConfig struct { } type State struct { - container docker.Config + config docker.Config imageID string containerID string commitMsg []string @@ -46,12 +46,12 @@ type State struct { type Build struct { rockerfile *Rockerfile - cfg BuildConfig + cfg Config client Client state State } -func New(client Client, rockerfile *Rockerfile, cfg BuildConfig) *Build { +func New(client Client, rockerfile *Rockerfile, cfg Config) *Build { return &Build{ rockerfile: rockerfile, cfg: cfg, diff --git a/src/rocker/build2/build_test.go b/src/rocker/build2/build_test.go index 069076b5..106b3073 100644 --- a/src/rocker/build2/build_test.go +++ b/src/rocker/build2/build_test.go @@ -28,13 +28,13 @@ import ( ) func TestNewBuild(t *testing.T) { - b, _ := makeBuild(t, "FROM ubuntu", BuildConfig{}) + b, _ := makeBuild(t, "FROM ubuntu", Config{}) assert.IsType(t, &Rockerfile{}, b.rockerfile) } // internal helpers -func makeBuild(t *testing.T, rockerfileContent string, cfg BuildConfig) (*Build, *MockClient) { +func makeBuild(t *testing.T, rockerfileContent string, cfg Config) (*Build, *MockClient) { pc, _, _, _ := runtime.Caller(1) fn := runtime.FuncForPC(pc) diff --git a/src/rocker/build2/client.go b/src/rocker/build2/client.go index 67a97a33..18553166 100644 --- a/src/rocker/build2/client.go +++ b/src/rocker/build2/client.go @@ -105,17 +105,17 @@ func (c *DockerClient) PullImage(name string) error { return nil } -func (c *DockerClient) CreateContainer(state State) (string, error) { +func (c *DockerClient) CreateContainer(s State) (string, error) { // TODO: mount volumes // volumesFrom := builder.getMountContainerIds() // binds := builder.getBinds() - state.container.Image = state.imageID + s.config.Image = s.imageID // TODO: assign human readable name? opts := docker.CreateContainerOptions{ - Config: &state.container, + Config: &s.config, HostConfig: &docker.HostConfig{ // Binds: binds, // VolumesFrom: volumesFrom, @@ -129,7 +129,7 @@ func (c *DockerClient) CreateContainer(state State) (string, error) { return "", err } - log.Infof(" | Created container %.12s (image %.12s)", container.ID, state.imageID) + log.Infof(" | Created container %.12s (image %.12s)", container.ID, s.imageID) return container.ID, nil } @@ -259,11 +259,11 @@ func (c *DockerClient) RunContainer(containerID string, attachStdin bool) error return nil } -func (c *DockerClient) CommitContainer(state State, message string) (string, error) { +func (c *DockerClient) CommitContainer(s State, message string) (string, error) { commitOpts := docker.CommitContainerOptions{ - Container: state.containerID, + Container: s.containerID, Message: message, - Run: &state.container, + Run: &s.config, } log.Debugf("Commit container: %# v", pretty.Formatter(commitOpts)) diff --git a/src/rocker/build2/commands.go b/src/rocker/build2/commands.go index d1d0cba9..84b627ef 100644 --- a/src/rocker/build2/commands.go +++ b/src/rocker/build2/commands.go @@ -103,7 +103,7 @@ func (c *CommandFrom) Execute(b *Build) (state State, err error) { state = b.state state.imageID = img.ID - state.container = *img.Config + state.config = *img.Config return state, nil } @@ -141,14 +141,14 @@ func (c *CommandCommit) Execute(b *Build) (s State, err error) { return s, fmt.Errorf("Nothing to commit, this might be a bug.") } - origCmd := s.container.Cmd - s.container.Cmd = []string{"/bin/sh", "-c", "#(nop) " + message} + origCmd := s.config.Cmd + s.config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + message} if s.containerID, err = b.client.CreateContainer(s); err != nil { return s, err } - s.container.Cmd = origCmd + s.config.Cmd = origCmd } if s.imageID, err = b.client.CommitContainer(s, message); err != nil { @@ -189,8 +189,8 @@ func (c *CommandRun) Execute(b *Build) (s State, err error) { // TODO: test with ENTRYPOINT // We run this command in the container using CMD - origCmd := s.container.Cmd - s.container.Cmd = cmd + origCmd := s.config.Cmd + s.config.Cmd = cmd if s.containerID, err = b.client.CreateContainer(s); err != nil { return s, err @@ -201,7 +201,7 @@ func (c *CommandRun) Execute(b *Build) (s State, err error) { } // Restore command after commit - s.container.Cmd = origCmd + s.config.Cmd = origCmd return s, nil } @@ -238,16 +238,16 @@ func (c *CommandEnv) Execute(b *Build) (s State, err error) { commitStr += " " + newVar gotOne := false - for i, envVar := range s.container.Env { + for i, envVar := range s.config.Env { envParts := strings.SplitN(envVar, "=", 2) if envParts[0] == args[j] { - s.container.Env[i] = newVar + s.config.Env[i] = newVar gotOne = true break } } if !gotOne { - s.container.Env = append(s.container.Env, newVar) + s.config.Env = append(s.config.Env, newVar) } } @@ -274,7 +274,7 @@ func (c *CommandCmd) Execute(b *Build) (s State, err error) { cmd = append([]string{"/bin/sh", "-c"}, cmd...) } - s.container.Cmd = cmd + s.config.Cmd = cmd s.commitMsg = append(s.commitMsg, fmt.Sprintf("CMD %q", cmd)) diff --git a/src/rocker/build2/commands_test.go b/src/rocker/build2/commands_test.go index 92330219..59a00f38 100644 --- a/src/rocker/build2/commands_test.go +++ b/src/rocker/build2/commands_test.go @@ -28,7 +28,7 @@ import ( // =========== Testing FROM =========== func TestCommandFrom_Existing(t *testing.T) { - b, c := makeBuild(t, "", BuildConfig{}) + b, c := makeBuild(t, "", Config{}) cmd := &CommandFrom{ConfigCommand{ args: []string{"existing"}, }} @@ -49,11 +49,11 @@ func TestCommandFrom_Existing(t *testing.T) { c.AssertExpectations(t) assert.Equal(t, "123", state.imageID) - assert.Equal(t, "localhost", state.container.Hostname) + assert.Equal(t, "localhost", state.config.Hostname) } func TestCommandFrom_PullExisting(t *testing.T) { - b, c := makeBuild(t, "", BuildConfig{Pull: true}) + b, c := makeBuild(t, "", Config{Pull: true}) cmd := &CommandFrom{ConfigCommand{ args: []string{"existing"}, }} @@ -75,11 +75,11 @@ func TestCommandFrom_PullExisting(t *testing.T) { c.AssertExpectations(t) assert.Equal(t, "123", state.imageID) - assert.Equal(t, "localhost", state.container.Hostname) + assert.Equal(t, "localhost", state.config.Hostname) } func TestCommandFrom_NotExisting(t *testing.T) { - b, c := makeBuild(t, "", BuildConfig{}) + b, c := makeBuild(t, "", Config{}) cmd := &CommandFrom{ConfigCommand{ args: []string{"not-existing"}, }} @@ -105,7 +105,7 @@ func TestCommandFrom_NotExisting(t *testing.T) { } func TestCommandFrom_AfterPullNotExisting(t *testing.T) { - b, c := makeBuild(t, "", BuildConfig{}) + b, c := makeBuild(t, "", Config{}) cmd := &CommandFrom{ConfigCommand{ args: []string{"not-existing"}, }} @@ -123,18 +123,18 @@ func TestCommandFrom_AfterPullNotExisting(t *testing.T) { // =========== Testing RUN =========== func TestCommandRun_Simple(t *testing.T) { - b, c := makeBuild(t, "", BuildConfig{}) + b, c := makeBuild(t, "", Config{}) cmd := &CommandRun{ConfigCommand{ args: []string{"whoami"}, }} origCmd := []string{"/bin/program"} - b.state.container.Cmd = origCmd + b.state.config.Cmd = origCmd b.state.imageID = "123" c.On("CreateContainer", mock.AnythingOfType("State")).Return("456", nil).Run(func(args mock.Arguments) { arg := args.Get(0).(State) - assert.Equal(t, []string{"/bin/sh", "-c", "whoami"}, arg.container.Cmd) + assert.Equal(t, []string{"/bin/sh", "-c", "whoami"}, arg.config.Cmd) }).Once() c.On("RunContainer", "456", false).Return(nil).Once() @@ -145,8 +145,8 @@ func TestCommandRun_Simple(t *testing.T) { } c.AssertExpectations(t) - assert.Equal(t, origCmd, b.state.container.Cmd) - assert.Equal(t, origCmd, state.container.Cmd) + assert.Equal(t, origCmd, b.state.config.Cmd) + assert.Equal(t, origCmd, state.config.Cmd) assert.Equal(t, "123", state.imageID) assert.Equal(t, "456", state.containerID) } @@ -154,7 +154,7 @@ func TestCommandRun_Simple(t *testing.T) { // =========== Testing COMMIT =========== func TestCommandCommit_Simple(t *testing.T) { - b, c := makeBuild(t, "", BuildConfig{}) + b, c := makeBuild(t, "", Config{}) cmd := &CommandCommit{} origCommitMsg := []string{"a", "b"} @@ -172,13 +172,13 @@ func TestCommandCommit_Simple(t *testing.T) { c.AssertExpectations(t) assert.Equal(t, origCommitMsg, b.state.commitMsg) assert.Equal(t, []string{}, state.commitMsg) - assert.Equal(t, []string(nil), state.container.Cmd) + assert.Equal(t, []string(nil), state.config.Cmd) assert.Equal(t, "789", state.imageID) assert.Equal(t, "", state.containerID) } func TestCommandCommit_NoContainer(t *testing.T) { - b, c := makeBuild(t, "", BuildConfig{}) + b, c := makeBuild(t, "", Config{}) cmd := &CommandCommit{} origCommitMsg := []string{"a", "b"} @@ -186,7 +186,7 @@ func TestCommandCommit_NoContainer(t *testing.T) { c.On("CreateContainer", mock.AnythingOfType("State")).Return("456", nil).Run(func(args mock.Arguments) { arg := args.Get(0).(State) - assert.Equal(t, []string{"/bin/sh", "-c", "#(nop) a; b"}, arg.container.Cmd) + assert.Equal(t, []string{"/bin/sh", "-c", "#(nop) a; b"}, arg.config.Cmd) }).Once() c.On("CommitContainer", mock.AnythingOfType("State"), "a; b").Return("789", nil).Once() @@ -205,7 +205,7 @@ func TestCommandCommit_NoContainer(t *testing.T) { } func TestCommandCommit_NoCommitMsgs(t *testing.T) { - b, _ := makeBuild(t, "", BuildConfig{}) + b, _ := makeBuild(t, "", Config{}) cmd := &CommandCommit{} _, err := cmd.Execute(b) @@ -215,7 +215,7 @@ func TestCommandCommit_NoCommitMsgs(t *testing.T) { // =========== Testing ENV =========== func TestCommandEnv_Simple(t *testing.T) { - b, _ := makeBuild(t, "", BuildConfig{}) + b, _ := makeBuild(t, "", Config{}) cmd := &CommandEnv{ConfigCommand{ args: []string{"type", "web", "env", "prod"}, }} @@ -226,16 +226,16 @@ func TestCommandEnv_Simple(t *testing.T) { } assert.Equal(t, []string{"ENV type=web env=prod"}, state.commitMsg) - assert.Equal(t, []string{"type=web", "env=prod"}, state.container.Env) + assert.Equal(t, []string{"type=web", "env=prod"}, state.config.Env) } func TestCommandEnv_Advanced(t *testing.T) { - b, _ := makeBuild(t, "", BuildConfig{}) + b, _ := makeBuild(t, "", Config{}) cmd := &CommandEnv{ConfigCommand{ args: []string{"type", "web", "env", "prod"}, }} - b.state.container.Env = []string{"env=dev", "version=1.2.3"} + b.state.config.Env = []string{"env=dev", "version=1.2.3"} state, err := cmd.Execute(b) if err != nil { @@ -243,13 +243,13 @@ func TestCommandEnv_Advanced(t *testing.T) { } assert.Equal(t, []string{"ENV type=web env=prod"}, state.commitMsg) - assert.Equal(t, []string{"env=prod", "version=1.2.3", "type=web"}, state.container.Env) + assert.Equal(t, []string{"env=prod", "version=1.2.3", "type=web"}, state.config.Env) } // =========== Testing CMD =========== func TestCommandCmd_Simple(t *testing.T) { - b, _ := makeBuild(t, "", BuildConfig{}) + b, _ := makeBuild(t, "", Config{}) cmd := &CommandCmd{ConfigCommand{ args: []string{"apt-get", "install"}, }} @@ -259,11 +259,11 @@ func TestCommandCmd_Simple(t *testing.T) { t.Fatal(err) } - assert.Equal(t, []string{"/bin/sh", "-c", "apt-get install"}, state.container.Cmd) + assert.Equal(t, []string{"/bin/sh", "-c", "apt-get install"}, state.config.Cmd) } func TestCommandCmd_Json(t *testing.T) { - b, _ := makeBuild(t, "", BuildConfig{}) + b, _ := makeBuild(t, "", Config{}) cmd := &CommandCmd{ConfigCommand{ args: []string{"apt-get", "install"}, attrs: map[string]bool{"json": true}, @@ -274,5 +274,5 @@ func TestCommandCmd_Json(t *testing.T) { t.Fatal(err) } - assert.Equal(t, []string{"apt-get", "install"}, state.container.Cmd) + assert.Equal(t, []string{"apt-get", "install"}, state.config.Cmd) } diff --git a/src/rocker/build2/plan_test.go b/src/rocker/build2/plan_test.go index 4b643d25..0290c28d 100644 --- a/src/rocker/build2/plan_test.go +++ b/src/rocker/build2/plan_test.go @@ -277,7 +277,7 @@ COPY rootfs / // internal helpers func makePlan(t *testing.T, rockerfileContent string) Plan { - b, _ := makeBuild(t, rockerfileContent, BuildConfig{}) + b, _ := makeBuild(t, rockerfileContent, Config{}) p, err := NewPlan(b) if err != nil { From 7ad7605d2b01055b26731665ba289773e5152b09 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Mon, 21 Sep 2015 13:40:41 +0300 Subject: [PATCH 017/131] vendor docke urlutil, httputils, tarsum --- vendor/manifest | 21 + .../docker/docker/pkg/httputils/httputils.go | 56 ++ .../docker/pkg/httputils/httputils_test.go | 115 ++++ .../docker/docker/pkg/httputils/mimetype.go | 30 + .../docker/pkg/httputils/mimetype_test.go | 13 + .../pkg/httputils/resumablerequestreader.go | 95 +++ .../httputils/resumablerequestreader_test.go | 307 +++++++++ .../docker/pkg/tarsum/builder_context.go | 21 + .../docker/pkg/tarsum/builder_context_test.go | 63 ++ .../docker/docker/pkg/tarsum/fileinfosums.go | 126 ++++ .../docker/pkg/tarsum/fileinfosums_test.go | 62 ++ .../docker/docker/pkg/tarsum/tarsum.go | 294 ++++++++ .../docker/docker/pkg/tarsum/tarsum_spec.md | 230 +++++++ .../docker/docker/pkg/tarsum/tarsum_test.go | 648 ++++++++++++++++++ .../json | 1 + .../layer.tar | Bin 0 -> 9216 bytes .../json | 1 + .../layer.tar | Bin 0 -> 1536 bytes .../tarsum/testdata/collision/collision-0.tar | Bin 0 -> 10240 bytes .../tarsum/testdata/collision/collision-1.tar | Bin 0 -> 10240 bytes .../tarsum/testdata/collision/collision-2.tar | Bin 0 -> 10240 bytes .../tarsum/testdata/collision/collision-3.tar | Bin 0 -> 10240 bytes .../docker/pkg/tarsum/testdata/xattr/json | 1 + .../pkg/tarsum/testdata/xattr/layer.tar | Bin 0 -> 2560 bytes .../docker/docker/pkg/tarsum/versioning.go | 150 ++++ .../docker/pkg/tarsum/versioning_test.go | 98 +++ .../docker/docker/pkg/tarsum/writercloser.go | 22 + .../docker/docker/pkg/urlutil/urlutil.go | 50 ++ .../docker/docker/pkg/urlutil/urlutil_test.go | 55 ++ 29 files changed, 2459 insertions(+) create mode 100644 vendor/src/github.com/docker/docker/pkg/httputils/httputils.go create mode 100644 vendor/src/github.com/docker/docker/pkg/httputils/httputils_test.go create mode 100644 vendor/src/github.com/docker/docker/pkg/httputils/mimetype.go create mode 100644 vendor/src/github.com/docker/docker/pkg/httputils/mimetype_test.go create mode 100644 vendor/src/github.com/docker/docker/pkg/httputils/resumablerequestreader.go create mode 100644 vendor/src/github.com/docker/docker/pkg/httputils/resumablerequestreader_test.go create mode 100644 vendor/src/github.com/docker/docker/pkg/tarsum/builder_context.go create mode 100644 vendor/src/github.com/docker/docker/pkg/tarsum/builder_context_test.go create mode 100644 vendor/src/github.com/docker/docker/pkg/tarsum/fileinfosums.go create mode 100644 vendor/src/github.com/docker/docker/pkg/tarsum/fileinfosums_test.go create mode 100644 vendor/src/github.com/docker/docker/pkg/tarsum/tarsum.go create mode 100644 vendor/src/github.com/docker/docker/pkg/tarsum/tarsum_spec.md create mode 100644 vendor/src/github.com/docker/docker/pkg/tarsum/tarsum_test.go create mode 100644 vendor/src/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json create mode 100644 vendor/src/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar create mode 100644 vendor/src/github.com/docker/docker/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json create mode 100644 vendor/src/github.com/docker/docker/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar create mode 100644 vendor/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-0.tar create mode 100644 vendor/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-1.tar create mode 100644 vendor/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-2.tar create mode 100644 vendor/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-3.tar create mode 100644 vendor/src/github.com/docker/docker/pkg/tarsum/testdata/xattr/json create mode 100644 vendor/src/github.com/docker/docker/pkg/tarsum/testdata/xattr/layer.tar create mode 100644 vendor/src/github.com/docker/docker/pkg/tarsum/versioning.go create mode 100644 vendor/src/github.com/docker/docker/pkg/tarsum/versioning_test.go create mode 100644 vendor/src/github.com/docker/docker/pkg/tarsum/writercloser.go create mode 100644 vendor/src/github.com/docker/docker/pkg/urlutil/urlutil.go create mode 100644 vendor/src/github.com/docker/docker/pkg/urlutil/urlutil_test.go diff --git a/vendor/manifest b/vendor/manifest index 7fdfb83f..33da011c 100644 --- a/vendor/manifest +++ b/vendor/manifest @@ -101,6 +101,27 @@ "repository": "https://github.com/stretchr/objx", "revision": "cbeaeb16a013161a98496fad62933b1d21786672", "branch": "master" + }, + { + "importpath": "github.com/docker/docker/pkg/urlutil", + "repository": "https://github.com/docker/docker", + "revision": "148be8bd7efd2cdb74b0cd9466fccb57c4c51834", + "branch": "master", + "path": "/pkg/urlutil" + }, + { + "importpath": "github.com/docker/docker/pkg/httputils", + "repository": "https://github.com/docker/docker", + "revision": "148be8bd7efd2cdb74b0cd9466fccb57c4c51834", + "branch": "master", + "path": "/pkg/httputils" + }, + { + "importpath": "github.com/docker/docker/pkg/tarsum", + "repository": "https://github.com/docker/docker", + "revision": "148be8bd7efd2cdb74b0cd9466fccb57c4c51834", + "branch": "master", + "path": "/pkg/tarsum" } ] } \ No newline at end of file diff --git a/vendor/src/github.com/docker/docker/pkg/httputils/httputils.go b/vendor/src/github.com/docker/docker/pkg/httputils/httputils.go new file mode 100644 index 00000000..d7dc4387 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/httputils/httputils.go @@ -0,0 +1,56 @@ +package httputils + +import ( + "errors" + "fmt" + "net/http" + "regexp" + "strings" + + "github.com/docker/docker/pkg/jsonmessage" +) + +var ( + headerRegexp = regexp.MustCompile(`^(?:(.+)/(.+?))\((.+)\).*$`) + errInvalidHeader = errors.New("Bad header, should be in format `docker/version (platform)`") +) + +// Download requests a given URL and returns an io.Reader. +func Download(url string) (resp *http.Response, err error) { + if resp, err = http.Get(url); err != nil { + return nil, err + } + if resp.StatusCode >= 400 { + return nil, fmt.Errorf("Got HTTP status code >= 400: %s", resp.Status) + } + return resp, nil +} + +// NewHTTPRequestError returns a JSON response error. +func NewHTTPRequestError(msg string, res *http.Response) error { + return &jsonmessage.JSONError{ + Message: msg, + Code: res.StatusCode, + } +} + +// ServerHeader contains the server information. +type ServerHeader struct { + App string // docker + Ver string // 1.8.0-dev + OS string // windows or linux +} + +// ParseServerHeader extracts pieces from an HTTP server header +// which is in the format "docker/version (os)" eg docker/1.8.0-dev (windows). +func ParseServerHeader(hdr string) (*ServerHeader, error) { + matches := headerRegexp.FindStringSubmatch(hdr) + if len(matches) != 4 { + return nil, errInvalidHeader + } + return &ServerHeader{ + App: strings.TrimSpace(matches[1]), + Ver: strings.TrimSpace(matches[2]), + OS: strings.TrimSpace(matches[3]), + }, nil +} diff --git a/vendor/src/github.com/docker/docker/pkg/httputils/httputils_test.go b/vendor/src/github.com/docker/docker/pkg/httputils/httputils_test.go new file mode 100644 index 00000000..d35d0821 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/httputils/httputils_test.go @@ -0,0 +1,115 @@ +package httputils + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "strings" + "testing" +) + +func TestDownload(t *testing.T) { + expected := "Hello, docker !" + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, expected) + })) + defer ts.Close() + response, err := Download(ts.URL) + if err != nil { + t.Fatal(err) + } + + actual, err := ioutil.ReadAll(response.Body) + response.Body.Close() + + if err != nil || string(actual) != expected { + t.Fatalf("Expected the response %q, got err:%v, response:%v, actual:%s", expected, err, response, string(actual)) + } +} + +func TestDownload400Errors(t *testing.T) { + expectedError := "Got HTTP status code >= 400: 403 Forbidden" + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // 403 + http.Error(w, "something failed (forbidden)", http.StatusForbidden) + })) + defer ts.Close() + // Expected status code = 403 + if _, err := Download(ts.URL); err == nil || err.Error() != expectedError { + t.Fatalf("Expected the the error %q, got %v", expectedError, err) + } +} + +func TestDownloadOtherErrors(t *testing.T) { + if _, err := Download("I'm not an url.."); err == nil || !strings.Contains(err.Error(), "unsupported protocol scheme") { + t.Fatalf("Expected an error with 'unsupported protocol scheme', got %v", err) + } +} + +func TestNewHTTPRequestError(t *testing.T) { + errorMessage := "Some error message" + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // 403 + http.Error(w, errorMessage, http.StatusForbidden) + })) + defer ts.Close() + httpResponse, err := http.Get(ts.URL) + if err != nil { + t.Fatal(err) + } + if err := NewHTTPRequestError(errorMessage, httpResponse); err.Error() != errorMessage { + t.Fatalf("Expected err to be %q, got %v", errorMessage, err) + } +} + +func TestParseServerHeader(t *testing.T) { + inputs := map[string][]string{ + "bad header": {"error"}, + "(bad header)": {"error"}, + "(without/spaces)": {"error"}, + "(header/with spaces)": {"error"}, + "foo/bar (baz)": {"foo", "bar", "baz"}, + "foo/bar": {"error"}, + "foo": {"error"}, + "foo/bar (baz space)": {"foo", "bar", "baz space"}, + " f f / b b ( b s ) ": {"f f", "b b", "b s"}, + "foo/bar (baz) ignore": {"foo", "bar", "baz"}, + "foo/bar ()": {"error"}, + "foo/bar()": {"error"}, + "foo/bar(baz)": {"foo", "bar", "baz"}, + "foo/bar/zzz(baz)": {"foo/bar", "zzz", "baz"}, + "foo/bar(baz/abc)": {"foo", "bar", "baz/abc"}, + "foo/bar(baz (abc))": {"foo", "bar", "baz (abc)"}, + } + + for header, values := range inputs { + serverHeader, err := ParseServerHeader(header) + if err != nil { + if err != errInvalidHeader { + t.Fatalf("Failed to parse %q, and got some unexpected error: %q", header, err) + } + if values[0] == "error" { + continue + } + t.Fatalf("Header %q failed to parse when it shouldn't have", header) + } + if values[0] == "error" { + t.Fatalf("Header %q parsed ok when it should have failed(%q).", header, serverHeader) + } + + if serverHeader.App != values[0] { + t.Fatalf("Expected serverHeader.App for %q to equal %q, got %q", header, values[0], serverHeader.App) + } + + if serverHeader.Ver != values[1] { + t.Fatalf("Expected serverHeader.Ver for %q to equal %q, got %q", header, values[1], serverHeader.Ver) + } + + if serverHeader.OS != values[2] { + t.Fatalf("Expected serverHeader.OS for %q to equal %q, got %q", header, values[2], serverHeader.OS) + } + + } + +} diff --git a/vendor/src/github.com/docker/docker/pkg/httputils/mimetype.go b/vendor/src/github.com/docker/docker/pkg/httputils/mimetype.go new file mode 100644 index 00000000..d5cf34e4 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/httputils/mimetype.go @@ -0,0 +1,30 @@ +package httputils + +import ( + "mime" + "net/http" +) + +// MimeTypes stores the MIME content type. +var MimeTypes = struct { + TextPlain string + Tar string + OctetStream string +}{"text/plain", "application/tar", "application/octet-stream"} + +// DetectContentType returns a best guess representation of the MIME +// content type for the bytes at c. The value detected by +// http.DetectContentType is guaranteed not be nil, defaulting to +// application/octet-stream when a better guess cannot be made. The +// result of this detection is then run through mime.ParseMediaType() +// which separates the actual MIME string from any parameters. +func DetectContentType(c []byte) (string, map[string]string, error) { + + ct := http.DetectContentType(c) + contentType, args, err := mime.ParseMediaType(ct) + if err != nil { + return "", nil, err + } + + return contentType, args, nil +} diff --git a/vendor/src/github.com/docker/docker/pkg/httputils/mimetype_test.go b/vendor/src/github.com/docker/docker/pkg/httputils/mimetype_test.go new file mode 100644 index 00000000..9de433ee --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/httputils/mimetype_test.go @@ -0,0 +1,13 @@ +package httputils + +import ( + "testing" +) + +func TestDetectContentType(t *testing.T) { + input := []byte("That is just a plain text") + + if contentType, _, err := DetectContentType(input); err != nil || contentType != "text/plain" { + t.Errorf("TestDetectContentType failed") + } +} diff --git a/vendor/src/github.com/docker/docker/pkg/httputils/resumablerequestreader.go b/vendor/src/github.com/docker/docker/pkg/httputils/resumablerequestreader.go new file mode 100644 index 00000000..bebc8608 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/httputils/resumablerequestreader.go @@ -0,0 +1,95 @@ +package httputils + +import ( + "fmt" + "io" + "net/http" + "time" + + "github.com/Sirupsen/logrus" +) + +type resumableRequestReader struct { + client *http.Client + request *http.Request + lastRange int64 + totalSize int64 + currentResponse *http.Response + failures uint32 + maxFailures uint32 +} + +// ResumableRequestReader makes it possible to resume reading a request's body transparently +// maxfail is the number of times we retry to make requests again (not resumes) +// totalsize is the total length of the body; auto detect if not provided +func ResumableRequestReader(c *http.Client, r *http.Request, maxfail uint32, totalsize int64) io.ReadCloser { + return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize} +} + +// ResumableRequestReaderWithInitialResponse makes it possible to resume +// reading the body of an already initiated request. +func ResumableRequestReaderWithInitialResponse(c *http.Client, r *http.Request, maxfail uint32, totalsize int64, initialResponse *http.Response) io.ReadCloser { + return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize, currentResponse: initialResponse} +} + +func (r *resumableRequestReader) Read(p []byte) (n int, err error) { + if r.client == nil || r.request == nil { + return 0, fmt.Errorf("client and request can't be nil\n") + } + isFreshRequest := false + if r.lastRange != 0 && r.currentResponse == nil { + readRange := fmt.Sprintf("bytes=%d-%d", r.lastRange, r.totalSize) + r.request.Header.Set("Range", readRange) + time.Sleep(5 * time.Second) + } + if r.currentResponse == nil { + r.currentResponse, err = r.client.Do(r.request) + isFreshRequest = true + } + if err != nil && r.failures+1 != r.maxFailures { + r.cleanUpResponse() + r.failures++ + time.Sleep(5 * time.Duration(r.failures) * time.Second) + return 0, nil + } else if err != nil { + r.cleanUpResponse() + return 0, err + } + if r.currentResponse.StatusCode == 416 && r.lastRange == r.totalSize && r.currentResponse.ContentLength == 0 { + r.cleanUpResponse() + return 0, io.EOF + } else if r.currentResponse.StatusCode != 206 && r.lastRange != 0 && isFreshRequest { + r.cleanUpResponse() + return 0, fmt.Errorf("the server doesn't support byte ranges") + } + if r.totalSize == 0 { + r.totalSize = r.currentResponse.ContentLength + } else if r.totalSize <= 0 { + r.cleanUpResponse() + return 0, fmt.Errorf("failed to auto detect content length") + } + n, err = r.currentResponse.Body.Read(p) + r.lastRange += int64(n) + if err != nil { + r.cleanUpResponse() + } + if err != nil && err != io.EOF { + logrus.Infof("encountered error during pull and clearing it before resume: %s", err) + err = nil + } + return n, err +} + +func (r *resumableRequestReader) Close() error { + r.cleanUpResponse() + r.client = nil + r.request = nil + return nil +} + +func (r *resumableRequestReader) cleanUpResponse() { + if r.currentResponse != nil { + r.currentResponse.Body.Close() + r.currentResponse = nil + } +} diff --git a/vendor/src/github.com/docker/docker/pkg/httputils/resumablerequestreader_test.go b/vendor/src/github.com/docker/docker/pkg/httputils/resumablerequestreader_test.go new file mode 100644 index 00000000..e9d05783 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/httputils/resumablerequestreader_test.go @@ -0,0 +1,307 @@ +package httputils + +import ( + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "strings" + "testing" +) + +func TestResumableRequestHeaderSimpleErrors(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, "Hello, world !") + })) + defer ts.Close() + + client := &http.Client{} + + var req *http.Request + req, err := http.NewRequest("GET", ts.URL, nil) + if err != nil { + t.Fatal(err) + } + + expectedError := "client and request can't be nil\n" + resreq := &resumableRequestReader{} + _, err = resreq.Read([]byte{}) + if err == nil || err.Error() != expectedError { + t.Fatalf("Expected an error with '%s', got %v.", expectedError, err) + } + + resreq = &resumableRequestReader{ + client: client, + request: req, + totalSize: -1, + } + expectedError = "failed to auto detect content length" + _, err = resreq.Read([]byte{}) + if err == nil || err.Error() != expectedError { + t.Fatalf("Expected an error with '%s', got %v.", expectedError, err) + } + +} + +// Not too much failures, bails out after some wait +func TestResumableRequestHeaderNotTooMuchFailures(t *testing.T) { + client := &http.Client{} + + var badReq *http.Request + badReq, err := http.NewRequest("GET", "I'm not an url", nil) + if err != nil { + t.Fatal(err) + } + + resreq := &resumableRequestReader{ + client: client, + request: badReq, + failures: 0, + maxFailures: 2, + } + read, err := resreq.Read([]byte{}) + if err != nil || read != 0 { + t.Fatalf("Expected no error and no byte read, got err:%v, read:%v.", err, read) + } +} + +// Too much failures, returns the error +func TestResumableRequestHeaderTooMuchFailures(t *testing.T) { + client := &http.Client{} + + var badReq *http.Request + badReq, err := http.NewRequest("GET", "I'm not an url", nil) + if err != nil { + t.Fatal(err) + } + + resreq := &resumableRequestReader{ + client: client, + request: badReq, + failures: 0, + maxFailures: 1, + } + defer resreq.Close() + + expectedError := `Get I%27m%20not%20an%20url: unsupported protocol scheme ""` + read, err := resreq.Read([]byte{}) + if err == nil || err.Error() != expectedError || read != 0 { + t.Fatalf("Expected the error '%s', got err:%v, read:%v.", expectedError, err, read) + } +} + +type errorReaderCloser struct{} + +func (errorReaderCloser) Close() error { return nil } + +func (errorReaderCloser) Read(p []byte) (n int, err error) { + return 0, fmt.Errorf("A error occured") +} + +// If a an unknown error is encountered, return 0, nil and log it +func TestResumableRequestReaderWithReadError(t *testing.T) { + var req *http.Request + req, err := http.NewRequest("GET", "", nil) + if err != nil { + t.Fatal(err) + } + + client := &http.Client{} + + response := &http.Response{ + Status: "500 Internal Server", + StatusCode: 500, + ContentLength: 0, + Close: true, + Body: errorReaderCloser{}, + } + + resreq := &resumableRequestReader{ + client: client, + request: req, + currentResponse: response, + lastRange: 1, + totalSize: 1, + } + defer resreq.Close() + + buf := make([]byte, 1) + read, err := resreq.Read(buf) + if err != nil { + t.Fatal(err) + } + + if read != 0 { + t.Fatalf("Expected to have read nothing, but read %v", read) + } +} + +func TestResumableRequestReaderWithEOFWith416Response(t *testing.T) { + var req *http.Request + req, err := http.NewRequest("GET", "", nil) + if err != nil { + t.Fatal(err) + } + + client := &http.Client{} + + response := &http.Response{ + Status: "416 Requested Range Not Satisfiable", + StatusCode: 416, + ContentLength: 0, + Close: true, + Body: ioutil.NopCloser(strings.NewReader("")), + } + + resreq := &resumableRequestReader{ + client: client, + request: req, + currentResponse: response, + lastRange: 1, + totalSize: 1, + } + defer resreq.Close() + + buf := make([]byte, 1) + _, err = resreq.Read(buf) + if err == nil || err != io.EOF { + t.Fatalf("Expected an io.EOF error, got %v", err) + } +} + +func TestResumableRequestReaderWithServerDoesntSupportByteRanges(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Header.Get("Range") == "" { + t.Fatalf("Expected a Range HTTP header, got nothing") + } + })) + defer ts.Close() + + var req *http.Request + req, err := http.NewRequest("GET", ts.URL, nil) + if err != nil { + t.Fatal(err) + } + + client := &http.Client{} + + resreq := &resumableRequestReader{ + client: client, + request: req, + lastRange: 1, + } + defer resreq.Close() + + buf := make([]byte, 2) + _, err = resreq.Read(buf) + if err == nil || err.Error() != "the server doesn't support byte ranges" { + t.Fatalf("Expected an error 'the server doesn't support byte ranges', got %v", err) + } +} + +func TestResumableRequestReaderWithZeroTotalSize(t *testing.T) { + + srvtxt := "some response text data" + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, srvtxt) + })) + defer ts.Close() + + var req *http.Request + req, err := http.NewRequest("GET", ts.URL, nil) + if err != nil { + t.Fatal(err) + } + + client := &http.Client{} + retries := uint32(5) + + resreq := ResumableRequestReader(client, req, retries, 0) + defer resreq.Close() + + data, err := ioutil.ReadAll(resreq) + if err != nil { + t.Fatal(err) + } + + resstr := strings.TrimSuffix(string(data), "\n") + + if resstr != srvtxt { + t.Errorf("resstr != srvtxt") + } +} + +func TestResumableRequestReader(t *testing.T) { + + srvtxt := "some response text data" + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, srvtxt) + })) + defer ts.Close() + + var req *http.Request + req, err := http.NewRequest("GET", ts.URL, nil) + if err != nil { + t.Fatal(err) + } + + client := &http.Client{} + retries := uint32(5) + imgSize := int64(len(srvtxt)) + + resreq := ResumableRequestReader(client, req, retries, imgSize) + defer resreq.Close() + + data, err := ioutil.ReadAll(resreq) + if err != nil { + t.Fatal(err) + } + + resstr := strings.TrimSuffix(string(data), "\n") + + if resstr != srvtxt { + t.Errorf("resstr != srvtxt") + } +} + +func TestResumableRequestReaderWithInitialResponse(t *testing.T) { + + srvtxt := "some response text data" + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, srvtxt) + })) + defer ts.Close() + + var req *http.Request + req, err := http.NewRequest("GET", ts.URL, nil) + if err != nil { + t.Fatal(err) + } + + client := &http.Client{} + retries := uint32(5) + imgSize := int64(len(srvtxt)) + + res, err := client.Do(req) + if err != nil { + t.Fatal(err) + } + + resreq := ResumableRequestReaderWithInitialResponse(client, req, retries, imgSize, res) + defer resreq.Close() + + data, err := ioutil.ReadAll(resreq) + if err != nil { + t.Fatal(err) + } + + resstr := strings.TrimSuffix(string(data), "\n") + + if resstr != srvtxt { + t.Errorf("resstr != srvtxt") + } +} diff --git a/vendor/src/github.com/docker/docker/pkg/tarsum/builder_context.go b/vendor/src/github.com/docker/docker/pkg/tarsum/builder_context.go new file mode 100644 index 00000000..b42983e9 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/tarsum/builder_context.go @@ -0,0 +1,21 @@ +package tarsum + +// BuilderContext is an interface extending TarSum by adding the Remove method. +// In general there was concern about adding this method to TarSum itself +// so instead it is being added just to "BuilderContext" which will then +// only be used during the .dockerignore file processing +// - see builder/evaluator.go +type BuilderContext interface { + TarSum + Remove(string) +} + +func (bc *tarSum) Remove(filename string) { + for i, fis := range bc.sums { + if fis.Name() == filename { + bc.sums = append(bc.sums[:i], bc.sums[i+1:]...) + // Note, we don't just return because there could be + // more than one with this name + } + } +} diff --git a/vendor/src/github.com/docker/docker/pkg/tarsum/builder_context_test.go b/vendor/src/github.com/docker/docker/pkg/tarsum/builder_context_test.go new file mode 100644 index 00000000..719f7289 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/tarsum/builder_context_test.go @@ -0,0 +1,63 @@ +package tarsum + +import ( + "io" + "io/ioutil" + "os" + "testing" +) + +// Try to remove tarsum (in the BuilderContext) that do not exists, won't change a thing +func TestTarSumRemoveNonExistent(t *testing.T) { + filename := "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar" + reader, err := os.Open(filename) + if err != nil { + t.Fatal(err) + } + ts, err := NewTarSum(reader, false, Version0) + if err != nil { + t.Fatal(err) + } + + // Read and discard bytes so that it populates sums + _, err = io.Copy(ioutil.Discard, ts) + if err != nil { + t.Errorf("failed to read from %s: %s", filename, err) + } + + expected := len(ts.GetSums()) + + ts.(BuilderContext).Remove("") + ts.(BuilderContext).Remove("Anything") + + if len(ts.GetSums()) != expected { + t.Fatalf("Expected %v sums, go %v.", expected, ts.GetSums()) + } +} + +// Remove a tarsum (in the BuilderContext) +func TestTarSumRemove(t *testing.T) { + filename := "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar" + reader, err := os.Open(filename) + if err != nil { + t.Fatal(err) + } + ts, err := NewTarSum(reader, false, Version0) + if err != nil { + t.Fatal(err) + } + + // Read and discard bytes so that it populates sums + _, err = io.Copy(ioutil.Discard, ts) + if err != nil { + t.Errorf("failed to read from %s: %s", filename, err) + } + + expected := len(ts.GetSums()) - 1 + + ts.(BuilderContext).Remove("etc/sudoers") + + if len(ts.GetSums()) != expected { + t.Fatalf("Expected %v sums, go %v.", expected, len(ts.GetSums())) + } +} diff --git a/vendor/src/github.com/docker/docker/pkg/tarsum/fileinfosums.go b/vendor/src/github.com/docker/docker/pkg/tarsum/fileinfosums.go new file mode 100644 index 00000000..7c2161c2 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/tarsum/fileinfosums.go @@ -0,0 +1,126 @@ +package tarsum + +import "sort" + +// FileInfoSumInterface provides an interface for accessing file checksum +// information within a tar file. This info is accessed through interface +// so the actual name and sum cannot be medled with. +type FileInfoSumInterface interface { + // File name + Name() string + // Checksum of this particular file and its headers + Sum() string + // Position of file in the tar + Pos() int64 +} + +type fileInfoSum struct { + name string + sum string + pos int64 +} + +func (fis fileInfoSum) Name() string { + return fis.name +} +func (fis fileInfoSum) Sum() string { + return fis.sum +} +func (fis fileInfoSum) Pos() int64 { + return fis.pos +} + +// FileInfoSums provides a list of FileInfoSumInterfaces. +type FileInfoSums []FileInfoSumInterface + +// GetFile returns the first FileInfoSumInterface with a matching name. +func (fis FileInfoSums) GetFile(name string) FileInfoSumInterface { + for i := range fis { + if fis[i].Name() == name { + return fis[i] + } + } + return nil +} + +// GetAllFile returns a FileInfoSums with all matching names. +func (fis FileInfoSums) GetAllFile(name string) FileInfoSums { + f := FileInfoSums{} + for i := range fis { + if fis[i].Name() == name { + f = append(f, fis[i]) + } + } + return f +} + +// GetDuplicatePaths returns a FileInfoSums with all duplicated paths. +func (fis FileInfoSums) GetDuplicatePaths() (dups FileInfoSums) { + seen := make(map[string]int, len(fis)) // allocate earl. no need to grow this map. + for i := range fis { + f := fis[i] + if _, ok := seen[f.Name()]; ok { + dups = append(dups, f) + } else { + seen[f.Name()] = 0 + } + } + return dups +} + +// Len returns the size of the FileInfoSums. +func (fis FileInfoSums) Len() int { return len(fis) } + +// Swap swaps two FileInfoSum values if a FileInfoSums list. +func (fis FileInfoSums) Swap(i, j int) { fis[i], fis[j] = fis[j], fis[i] } + +// SortByPos sorts FileInfoSums content by position. +func (fis FileInfoSums) SortByPos() { + sort.Sort(byPos{fis}) +} + +// SortByNames sorts FileInfoSums content by name. +func (fis FileInfoSums) SortByNames() { + sort.Sort(byName{fis}) +} + +// SortBySums sorts FileInfoSums content by sums. +func (fis FileInfoSums) SortBySums() { + dups := fis.GetDuplicatePaths() + if len(dups) > 0 { + sort.Sort(bySum{fis, dups}) + } else { + sort.Sort(bySum{fis, nil}) + } +} + +// byName is a sort.Sort helper for sorting by file names. +// If names are the same, order them by their appearance in the tar archive +type byName struct{ FileInfoSums } + +func (bn byName) Less(i, j int) bool { + if bn.FileInfoSums[i].Name() == bn.FileInfoSums[j].Name() { + return bn.FileInfoSums[i].Pos() < bn.FileInfoSums[j].Pos() + } + return bn.FileInfoSums[i].Name() < bn.FileInfoSums[j].Name() +} + +// bySum is a sort.Sort helper for sorting by the sums of all the fileinfos in the tar archive +type bySum struct { + FileInfoSums + dups FileInfoSums +} + +func (bs bySum) Less(i, j int) bool { + if bs.dups != nil && bs.FileInfoSums[i].Name() == bs.FileInfoSums[j].Name() { + return bs.FileInfoSums[i].Pos() < bs.FileInfoSums[j].Pos() + } + return bs.FileInfoSums[i].Sum() < bs.FileInfoSums[j].Sum() +} + +// byPos is a sort.Sort helper for sorting by the sums of all the fileinfos by their original order +type byPos struct{ FileInfoSums } + +func (bp byPos) Less(i, j int) bool { + return bp.FileInfoSums[i].Pos() < bp.FileInfoSums[j].Pos() +} diff --git a/vendor/src/github.com/docker/docker/pkg/tarsum/fileinfosums_test.go b/vendor/src/github.com/docker/docker/pkg/tarsum/fileinfosums_test.go new file mode 100644 index 00000000..bb700d8b --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/tarsum/fileinfosums_test.go @@ -0,0 +1,62 @@ +package tarsum + +import "testing" + +func newFileInfoSums() FileInfoSums { + return FileInfoSums{ + fileInfoSum{name: "file3", sum: "2abcdef1234567890", pos: 2}, + fileInfoSum{name: "dup1", sum: "deadbeef1", pos: 5}, + fileInfoSum{name: "file1", sum: "0abcdef1234567890", pos: 0}, + fileInfoSum{name: "file4", sum: "3abcdef1234567890", pos: 3}, + fileInfoSum{name: "dup1", sum: "deadbeef0", pos: 4}, + fileInfoSum{name: "file2", sum: "1abcdef1234567890", pos: 1}, + } +} + +func TestSortFileInfoSums(t *testing.T) { + dups := newFileInfoSums().GetAllFile("dup1") + if len(dups) != 2 { + t.Errorf("expected length 2, got %d", len(dups)) + } + dups.SortByNames() + if dups[0].Pos() != 4 { + t.Errorf("sorted dups should be ordered by position. Expected 4, got %d", dups[0].Pos()) + } + + fis := newFileInfoSums() + expected := "0abcdef1234567890" + fis.SortBySums() + got := fis[0].Sum() + if got != expected { + t.Errorf("Expected %q, got %q", expected, got) + } + + fis = newFileInfoSums() + expected = "dup1" + fis.SortByNames() + gotFis := fis[0] + if gotFis.Name() != expected { + t.Errorf("Expected %q, got %q", expected, gotFis.Name()) + } + // since a duplicate is first, ensure it is ordered first by position too + if gotFis.Pos() != 4 { + t.Errorf("Expected %d, got %d", 4, gotFis.Pos()) + } + + fis = newFileInfoSums() + fis.SortByPos() + if fis[0].Pos() != 0 { + t.Errorf("sorted fileInfoSums by Pos should order them by position.") + } + + fis = newFileInfoSums() + expected = "deadbeef1" + gotFileInfoSum := fis.GetFile("dup1") + if gotFileInfoSum.Sum() != expected { + t.Errorf("Expected %q, got %q", expected, gotFileInfoSum) + } + if fis.GetFile("noPresent") != nil { + t.Errorf("Should have return nil if name not found.") + } + +} diff --git a/vendor/src/github.com/docker/docker/pkg/tarsum/tarsum.go b/vendor/src/github.com/docker/docker/pkg/tarsum/tarsum.go new file mode 100644 index 00000000..d2df58c7 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/tarsum/tarsum.go @@ -0,0 +1,294 @@ +// Package tarsum provides algorithms to perform checksum calculation on +// filesystem layers. +// +// The transportation of filesystems, regarding Docker, is done with tar(1) +// archives. There are a variety of tar serialization formats [2], and a key +// concern here is ensuring a repeatable checksum given a set of inputs from a +// generic tar archive. Types of transportation include distribution to and from a +// registry endpoint, saving and loading through commands or Docker daemon APIs, +// transferring the build context from client to Docker daemon, and committing the +// filesystem of a container to become an image. +// +// As tar archives are used for transit, but not preserved in many situations, the +// focus of the algorithm is to ensure the integrity of the preserved filesystem, +// while maintaining a deterministic accountability. This includes neither +// constraining the ordering or manipulation of the files during the creation or +// unpacking of the archive, nor include additional metadata state about the file +// system attributes. +package tarsum + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "crypto" + "crypto/sha256" + "encoding/hex" + "errors" + "fmt" + "hash" + "io" + "strings" +) + +const ( + buf8K = 8 * 1024 + buf16K = 16 * 1024 + buf32K = 32 * 1024 +) + +// NewTarSum creates a new interface for calculating a fixed time checksum of a +// tar archive. +// +// This is used for calculating checksums of layers of an image, in some cases +// including the byte payload of the image's json metadata as well, and for +// calculating the checksums for buildcache. +func NewTarSum(r io.Reader, dc bool, v Version) (TarSum, error) { + return NewTarSumHash(r, dc, v, DefaultTHash) +} + +// NewTarSumHash creates a new TarSum, providing a THash to use rather than +// the DefaultTHash. +func NewTarSumHash(r io.Reader, dc bool, v Version, tHash THash) (TarSum, error) { + headerSelector, err := getTarHeaderSelector(v) + if err != nil { + return nil, err + } + ts := &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v, headerSelector: headerSelector, tHash: tHash} + err = ts.initTarSum() + return ts, err +} + +// NewTarSumForLabel creates a new TarSum using the provided TarSum version+hash label. +func NewTarSumForLabel(r io.Reader, disableCompression bool, label string) (TarSum, error) { + parts := strings.SplitN(label, "+", 2) + if len(parts) != 2 { + return nil, errors.New("tarsum label string should be of the form: {tarsum_version}+{hash_name}") + } + + versionName, hashName := parts[0], parts[1] + + version, ok := tarSumVersionsByName[versionName] + if !ok { + return nil, fmt.Errorf("unknown TarSum version name: %q", versionName) + } + + hashConfig, ok := standardHashConfigs[hashName] + if !ok { + return nil, fmt.Errorf("unknown TarSum hash name: %q", hashName) + } + + tHash := NewTHash(hashConfig.name, hashConfig.hash.New) + + return NewTarSumHash(r, disableCompression, version, tHash) +} + +// TarSum is the generic interface for calculating fixed time +// checksums of a tar archive. +type TarSum interface { + io.Reader + GetSums() FileInfoSums + Sum([]byte) string + Version() Version + Hash() THash +} + +// tarSum struct is the structure for a Version0 checksum calculation. +type tarSum struct { + io.Reader + tarR *tar.Reader + tarW *tar.Writer + writer writeCloseFlusher + bufTar *bytes.Buffer + bufWriter *bytes.Buffer + bufData []byte + h hash.Hash + tHash THash + sums FileInfoSums + fileCounter int64 + currentFile string + finished bool + first bool + DisableCompression bool // false by default. When false, the output gzip compressed. + tarSumVersion Version // this field is not exported so it can not be mutated during use + headerSelector tarHeaderSelector // handles selecting and ordering headers for files in the archive +} + +func (ts tarSum) Hash() THash { + return ts.tHash +} + +func (ts tarSum) Version() Version { + return ts.tarSumVersion +} + +// THash provides a hash.Hash type generator and its name. +type THash interface { + Hash() hash.Hash + Name() string +} + +// NewTHash is a convenience method for creating a THash. +func NewTHash(name string, h func() hash.Hash) THash { + return simpleTHash{n: name, h: h} +} + +type tHashConfig struct { + name string + hash crypto.Hash +} + +var ( + // NOTE: DO NOT include MD5 or SHA1, which are considered insecure. + standardHashConfigs = map[string]tHashConfig{ + "sha256": {name: "sha256", hash: crypto.SHA256}, + "sha512": {name: "sha512", hash: crypto.SHA512}, + } +) + +// TarSum default is "sha256" +var DefaultTHash = NewTHash("sha256", sha256.New) + +type simpleTHash struct { + n string + h func() hash.Hash +} + +func (sth simpleTHash) Name() string { return sth.n } +func (sth simpleTHash) Hash() hash.Hash { return sth.h() } + +func (ts *tarSum) encodeHeader(h *tar.Header) error { + for _, elem := range ts.headerSelector.selectHeaders(h) { + if _, err := ts.h.Write([]byte(elem[0] + elem[1])); err != nil { + return err + } + } + return nil +} + +func (ts *tarSum) initTarSum() error { + ts.bufTar = bytes.NewBuffer([]byte{}) + ts.bufWriter = bytes.NewBuffer([]byte{}) + ts.tarR = tar.NewReader(ts.Reader) + ts.tarW = tar.NewWriter(ts.bufTar) + if !ts.DisableCompression { + ts.writer = gzip.NewWriter(ts.bufWriter) + } else { + ts.writer = &nopCloseFlusher{Writer: ts.bufWriter} + } + if ts.tHash == nil { + ts.tHash = DefaultTHash + } + ts.h = ts.tHash.Hash() + ts.h.Reset() + ts.first = true + ts.sums = FileInfoSums{} + return nil +} + +func (ts *tarSum) Read(buf []byte) (int, error) { + if ts.finished { + return ts.bufWriter.Read(buf) + } + if len(ts.bufData) < len(buf) { + switch { + case len(buf) <= buf8K: + ts.bufData = make([]byte, buf8K) + case len(buf) <= buf16K: + ts.bufData = make([]byte, buf16K) + case len(buf) <= buf32K: + ts.bufData = make([]byte, buf32K) + default: + ts.bufData = make([]byte, len(buf)) + } + } + buf2 := ts.bufData[:len(buf)] + + n, err := ts.tarR.Read(buf2) + if err != nil { + if err == io.EOF { + if _, err := ts.h.Write(buf2[:n]); err != nil { + return 0, err + } + if !ts.first { + ts.sums = append(ts.sums, fileInfoSum{name: ts.currentFile, sum: hex.EncodeToString(ts.h.Sum(nil)), pos: ts.fileCounter}) + ts.fileCounter++ + ts.h.Reset() + } else { + ts.first = false + } + + currentHeader, err := ts.tarR.Next() + if err != nil { + if err == io.EOF { + if err := ts.tarW.Close(); err != nil { + return 0, err + } + if _, err := io.Copy(ts.writer, ts.bufTar); err != nil { + return 0, err + } + if err := ts.writer.Close(); err != nil { + return 0, err + } + ts.finished = true + return n, nil + } + return n, err + } + ts.currentFile = strings.TrimSuffix(strings.TrimPrefix(currentHeader.Name, "./"), "/") + if err := ts.encodeHeader(currentHeader); err != nil { + return 0, err + } + if err := ts.tarW.WriteHeader(currentHeader); err != nil { + return 0, err + } + if _, err := ts.tarW.Write(buf2[:n]); err != nil { + return 0, err + } + ts.tarW.Flush() + if _, err := io.Copy(ts.writer, ts.bufTar); err != nil { + return 0, err + } + ts.writer.Flush() + + return ts.bufWriter.Read(buf) + } + return n, err + } + + // Filling the hash buffer + if _, err = ts.h.Write(buf2[:n]); err != nil { + return 0, err + } + + // Filling the tar writter + if _, err = ts.tarW.Write(buf2[:n]); err != nil { + return 0, err + } + ts.tarW.Flush() + + // Filling the output writer + if _, err = io.Copy(ts.writer, ts.bufTar); err != nil { + return 0, err + } + ts.writer.Flush() + + return ts.bufWriter.Read(buf) +} + +func (ts *tarSum) Sum(extra []byte) string { + ts.sums.SortBySums() + h := ts.tHash.Hash() + if extra != nil { + h.Write(extra) + } + for _, fis := range ts.sums { + h.Write([]byte(fis.Sum())) + } + checksum := ts.Version().String() + "+" + ts.tHash.Name() + ":" + hex.EncodeToString(h.Sum(nil)) + return checksum +} + +func (ts *tarSum) GetSums() FileInfoSums { + return ts.sums +} diff --git a/vendor/src/github.com/docker/docker/pkg/tarsum/tarsum_spec.md b/vendor/src/github.com/docker/docker/pkg/tarsum/tarsum_spec.md new file mode 100644 index 00000000..77927ee7 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/tarsum/tarsum_spec.md @@ -0,0 +1,230 @@ +page_title: TarSum checksum specification +page_description: Documentation for algorithms used in the TarSum checksum calculation +page_keywords: docker, checksum, validation, tarsum + +# TarSum Checksum Specification + +## Abstract + +This document describes the algorithms used in performing the TarSum checksum +calculation on filesystem layers, the need for this method over existing +methods, and the versioning of this calculation. + +## Warning + +This checksum algorithm is for best-effort comparison of file trees with fuzzy logic. + +This is _not_ a cryptographic attestation, and should not be considered secure. + +## Introduction + +The transportation of filesystems, regarding Docker, is done with tar(1) +archives. There are a variety of tar serialization formats [2], and a key +concern here is ensuring a repeatable checksum given a set of inputs from a +generic tar archive. Types of transportation include distribution to and from a +registry endpoint, saving and loading through commands or Docker daemon APIs, +transferring the build context from client to Docker daemon, and committing the +filesystem of a container to become an image. + +As tar archives are used for transit, but not preserved in many situations, the +focus of the algorithm is to ensure the integrity of the preserved filesystem, +while maintaining a deterministic accountability. This includes neither +constraining the ordering or manipulation of the files during the creation or +unpacking of the archive, nor include additional metadata state about the file +system attributes. + +## Intended Audience + +This document is outlining the methods used for consistent checksum calculation +for filesystems transported via tar archives. + +Auditing these methodologies is an open and iterative process. This document +should accommodate the review of source code. Ultimately, this document should +be the starting point of further refinements to the algorithm and its future +versions. + +## Concept + +The checksum mechanism must ensure the integrity and assurance of the +filesystem payload. + +## Checksum Algorithm Profile + +A checksum mechanism must define the following operations and attributes: + +* Associated hashing cipher - used to checksum each file payload and attribute + information. +* Checksum list - each file of the filesystem archive has its checksum + calculated from the payload and attributes of the file. The final checksum is + calculated from this list, with specific ordering. +* Version - as the algorithm adapts to requirements, there are behaviors of the + algorithm to manage by versioning. +* Archive being calculated - the tar archive having its checksum calculated + +## Elements of TarSum checksum + +The calculated sum output is a text string. The elements included in the output +of the calculated sum comprise the information needed for validation of the sum +(TarSum version and hashing cipher used) and the expected checksum in hexadecimal +form. + +There are two delimiters used: +* '+' separates TarSum version from hashing cipher +* ':' separates calculation mechanics from expected hash + +Example: + +``` + "tarsum.v1+sha256:220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e" + | | \ | + | | \ | + |_version_|_cipher__|__ | + | \ | + |_calculation_mechanics_|______________________expected_sum_______________________| +``` + +## Versioning + +Versioning was introduced [0] to accommodate differences in calculation needed, +and ability to maintain reverse compatibility. + +The general algorithm will be describe further in the 'Calculation'. + +### Version0 + +This is the initial version of TarSum. + +Its element in the TarSum checksum string is `tarsum`. + +### Version1 + +Its element in the TarSum checksum is `tarsum.v1`. + +The notable changes in this version: +* Exclusion of file `mtime` from the file information headers, in each file + checksum calculation +* Inclusion of extended attributes (`xattrs`. Also seen as `SCHILY.xattr.` prefixed Pax + tar file info headers) keys and values in each file checksum calculation + +### VersionDev + +*Do not use unless validating refinements to the checksum algorithm* + +Its element in the TarSum checksum is `tarsum.dev`. + +This is a floating place holder for a next version and grounds for testing +changes. The methods used for calculation are subject to change without notice, +and this version is for testing and not for production use. + +## Ciphers + +The official default and standard hashing cipher used in the calculation mechanic +is `sha256`. This refers to SHA256 hash algorithm as defined in FIPS 180-4. + +Though the TarSum algorithm itself is not exclusively bound to the single +hashing cipher `sha256`, support for alternate hashing ciphers was later added +[1]. Use cases for alternate cipher could include future-proofing TarSum +checksum format and using faster cipher hashes for tar filesystem checksums. + +## Calculation + +### Requirement + +As mentioned earlier, the calculation is such that it takes into consideration +the lifecycle of the tar archive. In that the tar archive is not an immutable, +permanent artifact. Otherwise options like relying on a known hashing cipher +checksum of the archive itself would be reliable enough. The tar archive of the +filesystem is used as a transportation medium for Docker images, and the +archive is discarded once its contents are extracted. Therefore, for consistent +validation items such as order of files in the tar archive and time stamps are +subject to change once an image is received. + +### Process + +The method is typically iterative due to reading tar info headers from the +archive stream, though this is not a strict requirement. + +#### Files + +Each file in the tar archive have their contents (headers and body) checksummed +individually using the designated associated hashing cipher. The ordered +headers of the file are written to the checksum calculation first, and then the +payload of the file body. + +The resulting checksum of the file is appended to the list of file sums. The +sum is encoded as a string of the hexadecimal digest. Additionally, the file +name and position in the archive is kept as reference for special ordering. + +#### Headers + +The following headers are read, in this +order ( and the corresponding representation of its value): +* 'name' - string +* 'mode' - string of the base10 integer +* 'uid' - string of the integer +* 'gid' - string of the integer +* 'size' - string of the integer +* 'mtime' (_Version0 only_) - string of integer of the seconds since 1970-01-01 00:00:00 UTC +* 'typeflag' - string of the char +* 'linkname' - string +* 'uname' - string +* 'gname' - string +* 'devmajor' - string of the integer +* 'devminor' - string of the integer + +For >= Version1, the extended attribute headers ("SCHILY.xattr." prefixed pax +headers) included after the above list. These xattrs key/values are first +sorted by the keys. + +#### Header Format + +The ordered headers are written to the hash in the format of + + "{.key}{.value}" + +with no newline. + +#### Body + +After the order headers of the file have been added to the checksum for the +file, the body of the file is written to the hash. + +#### List of file sums + +The list of file sums is sorted by the string of the hexadecimal digest. + +If there are two files in the tar with matching paths, the order of occurrence +for that path is reflected for the sums of the corresponding file header and +body. + +#### Final Checksum + +Begin with a fresh or initial state of the associated hash cipher. If there is +additional payload to include in the TarSum calculation for the archive, it is +written first. Then each checksum from the ordered list of file sums is written +to the hash. + +The resulting digest is formatted per the Elements of TarSum checksum, +including the TarSum version, the associated hash cipher and the hexadecimal +encoded checksum digest. + +## Security Considerations + +The initial version of TarSum has undergone one update that could invalidate +handcrafted tar archives. The tar archive format supports appending of files +with same names as prior files in the archive. The latter file will clobber the +prior file of the same path. Due to this the algorithm now accounts for files +with matching paths, and orders the list of file sums accordingly [3]. + +## Footnotes + +* [0] Versioning https://github.com/docker/docker/commit/747f89cd327db9d50251b17797c4d825162226d0 +* [1] Alternate ciphers https://github.com/docker/docker/commit/4e9925d780665149b8bc940d5ba242ada1973c4e +* [2] Tar http://en.wikipedia.org/wiki/Tar_%28computing%29 +* [3] Name collision https://github.com/docker/docker/commit/c5e6362c53cbbc09ddbabd5a7323e04438b57d31 + +## Acknowledgements + +Joffrey F (shin-) and Guillaume J. Charmes (creack) on the initial work of the +TarSum calculation. + diff --git a/vendor/src/github.com/docker/docker/pkg/tarsum/tarsum_test.go b/vendor/src/github.com/docker/docker/pkg/tarsum/tarsum_test.go new file mode 100644 index 00000000..89626660 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/tarsum/tarsum_test.go @@ -0,0 +1,648 @@ +package tarsum + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "crypto/md5" + "crypto/rand" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + "testing" +) + +type testLayer struct { + filename string + options *sizedOptions + jsonfile string + gzip bool + tarsum string + version Version + hash THash +} + +var testLayers = []testLayer{ + { + filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", + jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", + version: Version0, + tarsum: "tarsum+sha256:4095cc12fa5fdb1ab2760377e1cd0c4ecdd3e61b4f9b82319d96fcea6c9a41c6"}, + { + filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", + jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", + version: VersionDev, + tarsum: "tarsum.dev+sha256:db56e35eec6ce65ba1588c20ba6b1ea23743b59e81fb6b7f358ccbde5580345c"}, + { + filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", + jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", + gzip: true, + tarsum: "tarsum+sha256:4095cc12fa5fdb1ab2760377e1cd0c4ecdd3e61b4f9b82319d96fcea6c9a41c6"}, + { + // Tests existing version of TarSum when xattrs are present + filename: "testdata/xattr/layer.tar", + jsonfile: "testdata/xattr/json", + version: Version0, + tarsum: "tarsum+sha256:07e304a8dbcb215b37649fde1a699f8aeea47e60815707f1cdf4d55d25ff6ab4"}, + { + // Tests next version of TarSum when xattrs are present + filename: "testdata/xattr/layer.tar", + jsonfile: "testdata/xattr/json", + version: VersionDev, + tarsum: "tarsum.dev+sha256:6c58917892d77b3b357b0f9ad1e28e1f4ae4de3a8006bd3beb8beda214d8fd16"}, + { + filename: "testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar", + jsonfile: "testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json", + tarsum: "tarsum+sha256:c66bd5ec9f87b8f4c6135ca37684618f486a3dd1d113b138d0a177bfa39c2571"}, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha256:8bf12d7e67c51ee2e8306cba569398b1b9f419969521a12ffb9d8875e8836738"}, + { + // this tar has two files with the same path + filename: "testdata/collision/collision-0.tar", + tarsum: "tarsum+sha256:08653904a68d3ab5c59e65ef58c49c1581caa3c34744f8d354b3f575ea04424a"}, + { + // this tar has the same two files (with the same path), but reversed order. ensuring is has different hash than above + filename: "testdata/collision/collision-1.tar", + tarsum: "tarsum+sha256:b51c13fbefe158b5ce420d2b930eef54c5cd55c50a2ee4abdddea8fa9f081e0d"}, + { + // this tar has newer of collider-0.tar, ensuring is has different hash + filename: "testdata/collision/collision-2.tar", + tarsum: "tarsum+sha256:381547080919bb82691e995508ae20ed33ce0f6948d41cafbeb70ce20c73ee8e"}, + { + // this tar has newer of collider-1.tar, ensuring is has different hash + filename: "testdata/collision/collision-3.tar", + tarsum: "tarsum+sha256:f886e431c08143164a676805205979cd8fa535dfcef714db5515650eea5a7c0f"}, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+md5:0d7529ec7a8360155b48134b8e599f53", + hash: md5THash, + }, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha1:f1fee39c5925807ff75ef1925e7a23be444ba4df", + hash: sha1Hash, + }, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha224:6319390c0b061d639085d8748b14cd55f697cf9313805218b21cf61c", + hash: sha224Hash, + }, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha384:a578ce3ce29a2ae03b8ed7c26f47d0f75b4fc849557c62454be4b5ffd66ba021e713b48ce71e947b43aab57afd5a7636", + hash: sha384Hash, + }, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha512:e9bfb90ca5a4dfc93c46ee061a5cf9837de6d2fdf82544d6460d3147290aecfabf7b5e415b9b6e72db9b8941f149d5d69fb17a394cbfaf2eac523bd9eae21855", + hash: sha512Hash, + }, +} + +type sizedOptions struct { + num int64 + size int64 + isRand bool + realFile bool +} + +// make a tar: +// * num is the number of files the tar should have +// * size is the bytes per file +// * isRand is whether the contents of the files should be a random chunk (otherwise it's all zeros) +// * realFile will write to a TempFile, instead of an in memory buffer +func sizedTar(opts sizedOptions) io.Reader { + var ( + fh io.ReadWriter + err error + ) + if opts.realFile { + fh, err = ioutil.TempFile("", "tarsum") + if err != nil { + return nil + } + } else { + fh = bytes.NewBuffer([]byte{}) + } + tarW := tar.NewWriter(fh) + defer tarW.Close() + for i := int64(0); i < opts.num; i++ { + err := tarW.WriteHeader(&tar.Header{ + Name: fmt.Sprintf("/testdata%d", i), + Mode: 0755, + Uid: 0, + Gid: 0, + Size: opts.size, + }) + if err != nil { + return nil + } + var rBuf []byte + if opts.isRand { + rBuf = make([]byte, 8) + _, err = rand.Read(rBuf) + if err != nil { + return nil + } + } else { + rBuf = []byte{0, 0, 0, 0, 0, 0, 0, 0} + } + + for i := int64(0); i < opts.size/int64(8); i++ { + tarW.Write(rBuf) + } + } + return fh +} + +func emptyTarSum(gzip bool) (TarSum, error) { + reader, writer := io.Pipe() + tarWriter := tar.NewWriter(writer) + + // Immediately close tarWriter and write-end of the + // Pipe in a separate goroutine so we don't block. + go func() { + tarWriter.Close() + writer.Close() + }() + + return NewTarSum(reader, !gzip, Version0) +} + +// Test errors on NewTarsumForLabel +func TestNewTarSumForLabelInvalid(t *testing.T) { + reader := strings.NewReader("") + + if _, err := NewTarSumForLabel(reader, true, "invalidlabel"); err == nil { + t.Fatalf("Expected an error, got nothing.") + } + + if _, err := NewTarSumForLabel(reader, true, "invalid+sha256"); err == nil { + t.Fatalf("Expected an error, got nothing.") + } + if _, err := NewTarSumForLabel(reader, true, "tarsum.v1+invalid"); err == nil { + t.Fatalf("Expected an error, got nothing.") + } +} + +func TestNewTarSumForLabel(t *testing.T) { + + layer := testLayers[0] + + reader, err := os.Open(layer.filename) + if err != nil { + t.Fatal(err) + } + label := strings.Split(layer.tarsum, ":")[0] + ts, err := NewTarSumForLabel(reader, false, label) + if err != nil { + t.Fatal(err) + } + + // Make sure it actually worked by reading a little bit of it + nbByteToRead := 8 * 1024 + dBuf := make([]byte, nbByteToRead) + _, err = ts.Read(dBuf) + if err != nil { + t.Errorf("failed to read %vKB from %s: %s", nbByteToRead, layer.filename, err) + } +} + +// TestEmptyTar tests that tarsum does not fail to read an empty tar +// and correctly returns the hex digest of an empty hash. +func TestEmptyTar(t *testing.T) { + // Test without gzip. + ts, err := emptyTarSum(false) + if err != nil { + t.Fatal(err) + } + + zeroBlock := make([]byte, 1024) + buf := new(bytes.Buffer) + + n, err := io.Copy(buf, ts) + if err != nil { + t.Fatal(err) + } + + if n != int64(len(zeroBlock)) || !bytes.Equal(buf.Bytes(), zeroBlock) { + t.Fatalf("tarSum did not write the correct number of zeroed bytes: %d", n) + } + + expectedSum := ts.Version().String() + "+sha256:" + hex.EncodeToString(sha256.New().Sum(nil)) + resultSum := ts.Sum(nil) + + if resultSum != expectedSum { + t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) + } + + // Test with gzip. + ts, err = emptyTarSum(true) + if err != nil { + t.Fatal(err) + } + buf.Reset() + + n, err = io.Copy(buf, ts) + if err != nil { + t.Fatal(err) + } + + bufgz := new(bytes.Buffer) + gz := gzip.NewWriter(bufgz) + n, err = io.Copy(gz, bytes.NewBuffer(zeroBlock)) + gz.Close() + gzBytes := bufgz.Bytes() + + if n != int64(len(zeroBlock)) || !bytes.Equal(buf.Bytes(), gzBytes) { + t.Fatalf("tarSum did not write the correct number of gzipped-zeroed bytes: %d", n) + } + + resultSum = ts.Sum(nil) + + if resultSum != expectedSum { + t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) + } + + // Test without ever actually writing anything. + if ts, err = NewTarSum(bytes.NewReader([]byte{}), true, Version0); err != nil { + t.Fatal(err) + } + + resultSum = ts.Sum(nil) + + if resultSum != expectedSum { + t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) + } +} + +var ( + md5THash = NewTHash("md5", md5.New) + sha1Hash = NewTHash("sha1", sha1.New) + sha224Hash = NewTHash("sha224", sha256.New224) + sha384Hash = NewTHash("sha384", sha512.New384) + sha512Hash = NewTHash("sha512", sha512.New) +) + +// Test all the build-in read size : buf8K, buf16K, buf32K and more +func TestTarSumsReadSize(t *testing.T) { + // Test always on the same layer (that is big enough) + layer := testLayers[0] + + for i := 0; i < 5; i++ { + + reader, err := os.Open(layer.filename) + if err != nil { + t.Fatal(err) + } + ts, err := NewTarSum(reader, false, layer.version) + if err != nil { + t.Fatal(err) + } + + // Read and discard bytes so that it populates sums + nbByteToRead := (i + 1) * 8 * 1024 + dBuf := make([]byte, nbByteToRead) + _, err = ts.Read(dBuf) + if err != nil { + t.Errorf("failed to read %vKB from %s: %s", nbByteToRead, layer.filename, err) + continue + } + } +} + +func TestTarSums(t *testing.T) { + for _, layer := range testLayers { + var ( + fh io.Reader + err error + ) + if len(layer.filename) > 0 { + fh, err = os.Open(layer.filename) + if err != nil { + t.Errorf("failed to open %s: %s", layer.filename, err) + continue + } + } else if layer.options != nil { + fh = sizedTar(*layer.options) + } else { + // What else is there to test? + t.Errorf("what to do with %#v", layer) + continue + } + if file, ok := fh.(*os.File); ok { + defer file.Close() + } + + var ts TarSum + if layer.hash == nil { + // double negatives! + ts, err = NewTarSum(fh, !layer.gzip, layer.version) + } else { + ts, err = NewTarSumHash(fh, !layer.gzip, layer.version, layer.hash) + } + if err != nil { + t.Errorf("%q :: %q", err, layer.filename) + continue + } + + // Read variable number of bytes to test dynamic buffer + dBuf := make([]byte, 1) + _, err = ts.Read(dBuf) + if err != nil { + t.Errorf("failed to read 1B from %s: %s", layer.filename, err) + continue + } + dBuf = make([]byte, 16*1024) + _, err = ts.Read(dBuf) + if err != nil { + t.Errorf("failed to read 16KB from %s: %s", layer.filename, err) + continue + } + + // Read and discard remaining bytes + _, err = io.Copy(ioutil.Discard, ts) + if err != nil { + t.Errorf("failed to copy from %s: %s", layer.filename, err) + continue + } + var gotSum string + if len(layer.jsonfile) > 0 { + jfh, err := os.Open(layer.jsonfile) + if err != nil { + t.Errorf("failed to open %s: %s", layer.jsonfile, err) + continue + } + buf, err := ioutil.ReadAll(jfh) + if err != nil { + t.Errorf("failed to readAll %s: %s", layer.jsonfile, err) + continue + } + gotSum = ts.Sum(buf) + } else { + gotSum = ts.Sum(nil) + } + + if layer.tarsum != gotSum { + t.Errorf("expecting [%s], but got [%s]", layer.tarsum, gotSum) + } + var expectedHashName string + if layer.hash != nil { + expectedHashName = layer.hash.Name() + } else { + expectedHashName = DefaultTHash.Name() + } + if expectedHashName != ts.Hash().Name() { + t.Errorf("expecting hash [%v], but got [%s]", expectedHashName, ts.Hash().Name()) + } + } +} + +func TestIteration(t *testing.T) { + headerTests := []struct { + expectedSum string // TODO(vbatts) it would be nice to get individual sums of each + version Version + hdr *tar.Header + data []byte + }{ + { + "tarsum+sha256:626c4a2e9a467d65c33ae81f7f3dedd4de8ccaee72af73223c4bc4718cbc7bbd", + Version0, + &tar.Header{ + Name: "file.txt", + Size: 0, + Typeflag: tar.TypeReg, + Devminor: 0, + Devmajor: 0, + }, + []byte(""), + }, + { + "tarsum.dev+sha256:6ffd43a1573a9913325b4918e124ee982a99c0f3cba90fc032a65f5e20bdd465", + VersionDev, + &tar.Header{ + Name: "file.txt", + Size: 0, + Typeflag: tar.TypeReg, + Devminor: 0, + Devmajor: 0, + }, + []byte(""), + }, + { + "tarsum.dev+sha256:b38166c059e11fb77bef30bf16fba7584446e80fcc156ff46d47e36c5305d8ef", + VersionDev, + &tar.Header{ + Name: "another.txt", + Uid: 1000, + Gid: 1000, + Uname: "slartibartfast", + Gname: "users", + Size: 4, + Typeflag: tar.TypeReg, + Devminor: 0, + Devmajor: 0, + }, + []byte("test"), + }, + { + "tarsum.dev+sha256:4cc2e71ac5d31833ab2be9b4f7842a14ce595ec96a37af4ed08f87bc374228cd", + VersionDev, + &tar.Header{ + Name: "xattrs.txt", + Uid: 1000, + Gid: 1000, + Uname: "slartibartfast", + Gname: "users", + Size: 4, + Typeflag: tar.TypeReg, + Xattrs: map[string]string{ + "user.key1": "value1", + "user.key2": "value2", + }, + }, + []byte("test"), + }, + { + "tarsum.dev+sha256:65f4284fa32c0d4112dd93c3637697805866415b570587e4fd266af241503760", + VersionDev, + &tar.Header{ + Name: "xattrs.txt", + Uid: 1000, + Gid: 1000, + Uname: "slartibartfast", + Gname: "users", + Size: 4, + Typeflag: tar.TypeReg, + Xattrs: map[string]string{ + "user.KEY1": "value1", // adding different case to ensure different sum + "user.key2": "value2", + }, + }, + []byte("test"), + }, + { + "tarsum+sha256:c12bb6f1303a9ddbf4576c52da74973c00d14c109bcfa76b708d5da1154a07fa", + Version0, + &tar.Header{ + Name: "xattrs.txt", + Uid: 1000, + Gid: 1000, + Uname: "slartibartfast", + Gname: "users", + Size: 4, + Typeflag: tar.TypeReg, + Xattrs: map[string]string{ + "user.NOT": "CALCULATED", + }, + }, + []byte("test"), + }, + } + for _, htest := range headerTests { + s, err := renderSumForHeader(htest.version, htest.hdr, htest.data) + if err != nil { + t.Fatal(err) + } + + if s != htest.expectedSum { + t.Errorf("expected sum: %q, got: %q", htest.expectedSum, s) + } + } + +} + +func renderSumForHeader(v Version, h *tar.Header, data []byte) (string, error) { + buf := bytes.NewBuffer(nil) + // first build our test tar + tw := tar.NewWriter(buf) + if err := tw.WriteHeader(h); err != nil { + return "", err + } + if _, err := tw.Write(data); err != nil { + return "", err + } + tw.Close() + + ts, err := NewTarSum(buf, true, v) + if err != nil { + return "", err + } + tr := tar.NewReader(ts) + for { + hdr, err := tr.Next() + if hdr == nil || err == io.EOF { + // Signals the end of the archive. + break + } + if err != nil { + return "", err + } + if _, err = io.Copy(ioutil.Discard, tr); err != nil { + return "", err + } + } + return ts.Sum(nil), nil +} + +func Benchmark9kTar(b *testing.B) { + buf := bytes.NewBuffer([]byte{}) + fh, err := os.Open("testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar") + if err != nil { + b.Error(err) + return + } + n, err := io.Copy(buf, fh) + fh.Close() + + reader := bytes.NewReader(buf.Bytes()) + + b.SetBytes(n) + b.ResetTimer() + for i := 0; i < b.N; i++ { + reader.Seek(0, 0) + ts, err := NewTarSum(reader, true, Version0) + if err != nil { + b.Error(err) + return + } + io.Copy(ioutil.Discard, ts) + ts.Sum(nil) + } +} + +func Benchmark9kTarGzip(b *testing.B) { + buf := bytes.NewBuffer([]byte{}) + fh, err := os.Open("testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar") + if err != nil { + b.Error(err) + return + } + n, err := io.Copy(buf, fh) + fh.Close() + + reader := bytes.NewReader(buf.Bytes()) + + b.SetBytes(n) + b.ResetTimer() + for i := 0; i < b.N; i++ { + reader.Seek(0, 0) + ts, err := NewTarSum(reader, false, Version0) + if err != nil { + b.Error(err) + return + } + io.Copy(ioutil.Discard, ts) + ts.Sum(nil) + } +} + +// this is a single big file in the tar archive +func Benchmark1mbSingleFileTar(b *testing.B) { + benchmarkTar(b, sizedOptions{1, 1024 * 1024, true, true}, false) +} + +// this is a single big file in the tar archive +func Benchmark1mbSingleFileTarGzip(b *testing.B) { + benchmarkTar(b, sizedOptions{1, 1024 * 1024, true, true}, true) +} + +// this is 1024 1k files in the tar archive +func Benchmark1kFilesTar(b *testing.B) { + benchmarkTar(b, sizedOptions{1024, 1024, true, true}, false) +} + +// this is 1024 1k files in the tar archive +func Benchmark1kFilesTarGzip(b *testing.B) { + benchmarkTar(b, sizedOptions{1024, 1024, true, true}, true) +} + +func benchmarkTar(b *testing.B, opts sizedOptions, isGzip bool) { + var fh *os.File + tarReader := sizedTar(opts) + if br, ok := tarReader.(*os.File); ok { + fh = br + } + defer os.Remove(fh.Name()) + defer fh.Close() + + b.SetBytes(opts.size * opts.num) + b.ResetTimer() + for i := 0; i < b.N; i++ { + ts, err := NewTarSum(fh, !isGzip, Version0) + if err != nil { + b.Error(err) + return + } + io.Copy(ioutil.Discard, ts) + ts.Sum(nil) + fh.Seek(0, 0) + } +} diff --git a/vendor/src/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json b/vendor/src/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json new file mode 100644 index 00000000..48e2af34 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json @@ -0,0 +1 @@ +{"id":"46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457","parent":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","created":"2014-04-07T02:45:52.610504484Z","container":"e0f07f8d72cae171a3dcc35859960e7e956e0628bce6fedc4122bf55b2c287c7","container_config":{"Hostname":"88807319f25e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh","-c","sed -ri 's/^(%wheel.*)(ALL)$/\\1NOPASSWD: \\2/' /etc/sudoers"],"Image":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"docker_version":"0.9.1-dev","config":{"Hostname":"88807319f25e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":null,"Image":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"architecture":"amd64","os":"linux","Size":3425} \ No newline at end of file diff --git a/vendor/src/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar b/vendor/src/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar new file mode 100644 index 0000000000000000000000000000000000000000..dfd5c204aea77673f13fdd2f81cb4af1c155c00c GIT binary patch literal 9216 zcmeHMYfsx)8s=;H6|bl&iYAZ?p-5<1$(y*vYHk~c?XX{vu}|fzRr1|&zyvK1! zQq)nWWVPA}63Myvy*}^F5Qtg*V8=g=M!Ru&adFTnf40B*^q|=~Z#CM@#>M%EgGRH_ zXtfULV#j(J_Jz`34wZgZ*0ym!%kRHL9{_(p&BZRoHJYu)<>loz?$!PU{9Bjp<^i?p zS)Tg!r=9Az$G@(0Ao6^75%A;qpMSV)ukcqQn%1X5y|oh!_xLmZX`y%GUBmQG;D6af z{a@yPg@1D=8t(B&ZtcXgE2ck=f9pf*x&ANlU$J}L#UB59rsJ=#>(otde**vZ1?PXJ z)y|dMh8z!Kfh=;zN!B|J)*y8)L$Hbq5c2K_rK=l{{8R8czxwV#$Odd zDsuJ8oS)h8`+U3IsNVOszdy8F?XCC!X1jHMK)Xr!XT8koFP{Hz-;!IxPhJ$Ib48h# zYv~t}ms6n-7Nk?ki-cxgF4IDhpT@D51d2R$2x=V)%F|Svhif#KI>gHaB|@O7JU(A% zo>KEP56(cuboN&-&LROexgfmf&txD1^0c9NNVQI5N~dNwm64!nnnQFH317=JF`{vu zi^$WUtCWHQq4Y!Yy@W{oRoV29sUd<=@!~sJ;!ok8>_qYfz|Ch12+9P6$8i`#qvqS zhsLT-8QL!zwhRx(aXaYF&PwD5LLOm%T#Ds>) z{YV0A>qPL*aFLnz9*nfyl@!I3_Ss=Y=MKNEA zG8|$lPj#9`#(W1sgCgK@f)P?2A)0uPB8Gf6TLITOAl@|29e$jAvBox=W-QCrr59N% zKg$7Xy=69F7QR_X7D_-i2hs*J)6%&RIBr9LDPPP_-? z-X`DPuwzY(j+Gk=rWL_Msfvvp-prW$3W(MwPPgEZO^EI!{*XIAuLp zlpj9k85vO{{2kR4hD{4c;~{+QmhNVfq;xeepJc>QQ@QJfEkdQVBbPJuiA~nsv9l~O zrN&UpxC9i`6;rQ>v?7%WUrr@(gXOs4JE=IN=}4(?RS=2GEd9-ogTEiuP>Fqyb6;vM ziV-Q;Z|ZT?Vz^rPk?`^}6a`cC_=9V1=*>jc&y0jq{h|=m&BK+Jpv}ea1?sKVi^Gj` zk<9K*;4?gK^?Jl6-g0L4kQcX>OZUHi{>Odi#u~f!gnqSdCpW{f zGr2q31WO6O$i;nz9#NH-D^8Rv6Xcv%XFkhmyBsZ;8k2ftd;fPtN1v+`G zPRv~5E)wm1y}~(Py9GwK;`;9K2C_2#(Rc=qFBTa z>?ZUNHvSmq9G9)M%0u+CW!J=jv1~Clz-avUIImk%<&=a9uI;2EY~~stiCKTsh|Oow<5; z$eY1%WV!B_?iFikc)C2TV46YQucl=WfmM#jY|_4sK>Njf)j#u#Y{x@V_A!c2o<`D? zX*2YQ4A)U054Qh4y3hVk?0?5^Us~rh*TViU9vl!r009ILKmY**5I_I{1Q0*~0R#|0 Y009ILKmY**5I_I{1Q0*~fqxTt0{2EK)Bpeg literal 0 HcmV?d00001 diff --git a/vendor/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-2.tar b/vendor/src/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-2.tar new file mode 100644 index 0000000000000000000000000000000000000000..7b5c04a9644808851fcccab5c3c240bf342abd93 GIT binary patch literal 10240 zcmeIuF%E+;425COJw=XS2L~?Dp<74P5hRe1I+e8NZ(w35>V(Abzr};)_<@(2e`|Ha`Z>GG~@_KYd${~ON w0tg_000IagfB*srAbVE5xzPBd+@To)G|2840byWhU|?oqf;;~Mb02E{2kHRk de~R-YhD)#rjPU%AB}7JrMnhmU1V%^*0091(G-Ch& literal 0 HcmV?d00001 diff --git a/vendor/src/github.com/docker/docker/pkg/tarsum/versioning.go b/vendor/src/github.com/docker/docker/pkg/tarsum/versioning.go new file mode 100644 index 00000000..28822868 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/tarsum/versioning.go @@ -0,0 +1,150 @@ +package tarsum + +import ( + "archive/tar" + "errors" + "sort" + "strconv" + "strings" +) + +// Version is used for versioning of the TarSum algorithm +// based on the prefix of the hash used +// i.e. "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b" +type Version int + +// Prefix of "tarsum" +const ( + Version0 Version = iota + Version1 + // VersionDev this constant will be either the latest or an unsettled next-version of the TarSum calculation + VersionDev +) + +// VersionLabelForChecksum returns the label for the given tarsum +// checksum, i.e., everything before the first `+` character in +// the string or an empty string if no label separator is found. +func VersionLabelForChecksum(checksum string) string { + // Checksums are in the form: {versionLabel}+{hashID}:{hex} + sepIndex := strings.Index(checksum, "+") + if sepIndex < 0 { + return "" + } + return checksum[:sepIndex] +} + +// GetVersions gets a list of all known tarsum versions. +func GetVersions() []Version { + v := []Version{} + for k := range tarSumVersions { + v = append(v, k) + } + return v +} + +var ( + tarSumVersions = map[Version]string{ + Version0: "tarsum", + Version1: "tarsum.v1", + VersionDev: "tarsum.dev", + } + tarSumVersionsByName = map[string]Version{ + "tarsum": Version0, + "tarsum.v1": Version1, + "tarsum.dev": VersionDev, + } +) + +func (tsv Version) String() string { + return tarSumVersions[tsv] +} + +// GetVersionFromTarsum returns the Version from the provided string. +func GetVersionFromTarsum(tarsum string) (Version, error) { + tsv := tarsum + if strings.Contains(tarsum, "+") { + tsv = strings.SplitN(tarsum, "+", 2)[0] + } + for v, s := range tarSumVersions { + if s == tsv { + return v, nil + } + } + return -1, ErrNotVersion +} + +// Errors that may be returned by functions in this package +var ( + ErrNotVersion = errors.New("string does not include a TarSum Version") + ErrVersionNotImplemented = errors.New("TarSum Version is not yet implemented") +) + +// tarHeaderSelector is the interface which different versions +// of tarsum should use for selecting and ordering tar headers +// for each item in the archive. +type tarHeaderSelector interface { + selectHeaders(h *tar.Header) (orderedHeaders [][2]string) +} + +type tarHeaderSelectFunc func(h *tar.Header) (orderedHeaders [][2]string) + +func (f tarHeaderSelectFunc) selectHeaders(h *tar.Header) (orderedHeaders [][2]string) { + return f(h) +} + +func v0TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { + return [][2]string{ + {"name", h.Name}, + {"mode", strconv.FormatInt(h.Mode, 10)}, + {"uid", strconv.Itoa(h.Uid)}, + {"gid", strconv.Itoa(h.Gid)}, + {"size", strconv.FormatInt(h.Size, 10)}, + {"mtime", strconv.FormatInt(h.ModTime.UTC().Unix(), 10)}, + {"typeflag", string([]byte{h.Typeflag})}, + {"linkname", h.Linkname}, + {"uname", h.Uname}, + {"gname", h.Gname}, + {"devmajor", strconv.FormatInt(h.Devmajor, 10)}, + {"devminor", strconv.FormatInt(h.Devminor, 10)}, + } +} + +func v1TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { + // Get extended attributes. + xAttrKeys := make([]string, len(h.Xattrs)) + for k := range h.Xattrs { + xAttrKeys = append(xAttrKeys, k) + } + sort.Strings(xAttrKeys) + + // Make the slice with enough capacity to hold the 11 basic headers + // we want from the v0 selector plus however many xattrs we have. + orderedHeaders = make([][2]string, 0, 11+len(xAttrKeys)) + + // Copy all headers from v0 excluding the 'mtime' header (the 5th element). + v0headers := v0TarHeaderSelect(h) + orderedHeaders = append(orderedHeaders, v0headers[0:5]...) + orderedHeaders = append(orderedHeaders, v0headers[6:]...) + + // Finally, append the sorted xattrs. + for _, k := range xAttrKeys { + orderedHeaders = append(orderedHeaders, [2]string{k, h.Xattrs[k]}) + } + + return +} + +var registeredHeaderSelectors = map[Version]tarHeaderSelectFunc{ + Version0: v0TarHeaderSelect, + Version1: v1TarHeaderSelect, + VersionDev: v1TarHeaderSelect, +} + +func getTarHeaderSelector(v Version) (tarHeaderSelector, error) { + headerSelector, ok := registeredHeaderSelectors[v] + if !ok { + return nil, ErrVersionNotImplemented + } + + return headerSelector, nil +} diff --git a/vendor/src/github.com/docker/docker/pkg/tarsum/versioning_test.go b/vendor/src/github.com/docker/docker/pkg/tarsum/versioning_test.go new file mode 100644 index 00000000..88e0a578 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/tarsum/versioning_test.go @@ -0,0 +1,98 @@ +package tarsum + +import ( + "testing" +) + +func TestVersionLabelForChecksum(t *testing.T) { + version := VersionLabelForChecksum("tarsum+sha256:deadbeef") + if version != "tarsum" { + t.Fatalf("Version should have been 'tarsum', was %v", version) + } + version = VersionLabelForChecksum("tarsum.v1+sha256:deadbeef") + if version != "tarsum.v1" { + t.Fatalf("Version should have been 'tarsum.v1', was %v", version) + } + version = VersionLabelForChecksum("something+somethingelse") + if version != "something" { + t.Fatalf("Version should have been 'something', was %v", version) + } + version = VersionLabelForChecksum("invalidChecksum") + if version != "" { + t.Fatalf("Version should have been empty, was %v", version) + } +} + +func TestVersion(t *testing.T) { + expected := "tarsum" + var v Version + if v.String() != expected { + t.Errorf("expected %q, got %q", expected, v.String()) + } + + expected = "tarsum.v1" + v = 1 + if v.String() != expected { + t.Errorf("expected %q, got %q", expected, v.String()) + } + + expected = "tarsum.dev" + v = 2 + if v.String() != expected { + t.Errorf("expected %q, got %q", expected, v.String()) + } +} + +func TestGetVersion(t *testing.T) { + testSet := []struct { + Str string + Expected Version + }{ + {"tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", Version0}, + {"tarsum+sha256", Version0}, + {"tarsum", Version0}, + {"tarsum.dev", VersionDev}, + {"tarsum.dev+sha256:deadbeef", VersionDev}, + } + + for _, ts := range testSet { + v, err := GetVersionFromTarsum(ts.Str) + if err != nil { + t.Fatalf("%q : %s", err, ts.Str) + } + if v != ts.Expected { + t.Errorf("expected %d (%q), got %d (%q)", ts.Expected, ts.Expected, v, v) + } + } + + // test one that does not exist, to ensure it errors + str := "weak+md5:abcdeabcde" + _, err := GetVersionFromTarsum(str) + if err != ErrNotVersion { + t.Fatalf("%q : %s", err, str) + } +} + +func TestGetVersions(t *testing.T) { + expected := []Version{ + Version0, + Version1, + VersionDev, + } + versions := GetVersions() + if len(versions) != len(expected) { + t.Fatalf("Expected %v versions, got %v", len(expected), len(versions)) + } + if !containsVersion(versions, expected[0]) || !containsVersion(versions, expected[1]) || !containsVersion(versions, expected[2]) { + t.Fatalf("Expected [%v], got [%v]", expected, versions) + } +} + +func containsVersion(versions []Version, version Version) bool { + for _, v := range versions { + if v == version { + return true + } + } + return false +} diff --git a/vendor/src/github.com/docker/docker/pkg/tarsum/writercloser.go b/vendor/src/github.com/docker/docker/pkg/tarsum/writercloser.go new file mode 100644 index 00000000..9727ecde --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/tarsum/writercloser.go @@ -0,0 +1,22 @@ +package tarsum + +import ( + "io" +) + +type writeCloseFlusher interface { + io.WriteCloser + Flush() error +} + +type nopCloseFlusher struct { + io.Writer +} + +func (n *nopCloseFlusher) Close() error { + return nil +} + +func (n *nopCloseFlusher) Flush() error { + return nil +} diff --git a/vendor/src/github.com/docker/docker/pkg/urlutil/urlutil.go b/vendor/src/github.com/docker/docker/pkg/urlutil/urlutil.go new file mode 100644 index 00000000..f7094b1f --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/urlutil/urlutil.go @@ -0,0 +1,50 @@ +// Package urlutil provides helper function to check urls kind. +// It supports http urls, git urls and transport url (tcp://, …) +package urlutil + +import ( + "regexp" + "strings" +) + +var ( + validPrefixes = map[string][]string{ + "url": {"http://", "https://"}, + "git": {"git://", "github.com/", "git@"}, + "transport": {"tcp://", "udp://", "unix://"}, + } + urlPathWithFragmentSuffix = regexp.MustCompile(".git(?:#.+)?$") +) + +// IsURL returns true if the provided str is an HTTP(S) URL. +func IsURL(str string) bool { + return checkURL(str, "url") +} + +// IsGitURL returns true if the provided str is a git repository URL. +func IsGitURL(str string) bool { + if IsURL(str) && urlPathWithFragmentSuffix.MatchString(str) { + return true + } + return checkURL(str, "git") +} + +// IsGitTransport returns true if the provided str is a git transport by inspecting +// the prefix of the string for known protocols used in git. +func IsGitTransport(str string) bool { + return IsURL(str) || strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "git@") +} + +// IsTransportURL returns true if the provided str is a transport (tcp, udp, unix) URL. +func IsTransportURL(str string) bool { + return checkURL(str, "transport") +} + +func checkURL(str, kind string) bool { + for _, prefix := range validPrefixes[kind] { + if strings.HasPrefix(str, prefix) { + return true + } + } + return false +} diff --git a/vendor/src/github.com/docker/docker/pkg/urlutil/urlutil_test.go b/vendor/src/github.com/docker/docker/pkg/urlutil/urlutil_test.go new file mode 100644 index 00000000..bb89d8b5 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/urlutil/urlutil_test.go @@ -0,0 +1,55 @@ +package urlutil + +import "testing" + +var ( + gitUrls = []string{ + "git://github.com/docker/docker", + "git@github.com:docker/docker.git", + "git@bitbucket.org:atlassianlabs/atlassian-docker.git", + "https://github.com/docker/docker.git", + "http://github.com/docker/docker.git", + "http://github.com/docker/docker.git#branch", + "http://github.com/docker/docker.git#:dir", + } + incompleteGitUrls = []string{ + "github.com/docker/docker", + } + invalidGitUrls = []string{ + "http://github.com/docker/docker.git:#branch", + } +) + +func TestValidGitTransport(t *testing.T) { + for _, url := range gitUrls { + if IsGitTransport(url) == false { + t.Fatalf("%q should be detected as valid Git prefix", url) + } + } + + for _, url := range incompleteGitUrls { + if IsGitTransport(url) == true { + t.Fatalf("%q should not be detected as valid Git prefix", url) + } + } +} + +func TestIsGIT(t *testing.T) { + for _, url := range gitUrls { + if IsGitURL(url) == false { + t.Fatalf("%q should be detected as valid Git url", url) + } + } + + for _, url := range incompleteGitUrls { + if IsGitURL(url) == false { + t.Fatalf("%q should be detected as valid Git url", url) + } + } + + for _, url := range invalidGitUrls { + if IsGitURL(url) == true { + t.Fatalf("%q should not be detected as valid Git prefix", url) + } + } +} From f39e9c69f9224d8c247a76297acde2bfc266e723 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Mon, 21 Sep 2015 13:41:14 +0300 Subject: [PATCH 018/131] draft COPY --- src/rocker/build2/build_test.go | 6 + src/rocker/build2/client.go | 13 + src/rocker/build2/commands.go | 5 +- src/rocker/build2/commands_test.go | 30 +++ src/rocker/build2/copy.go | 372 ++++++++++++++++++++++++++++ src/rocker/build2/copy2.go | 154 ++++++++++++ src/rocker/build2/copy2_test.go | 88 +++++++ src/rocker/build2/testdata/file.txt | 1 + 8 files changed, 668 insertions(+), 1 deletion(-) create mode 100644 src/rocker/build2/copy.go create mode 100644 src/rocker/build2/copy2.go create mode 100644 src/rocker/build2/copy2_test.go create mode 100644 src/rocker/build2/testdata/file.txt diff --git a/src/rocker/build2/build_test.go b/src/rocker/build2/build_test.go index 106b3073..23c5c724 100644 --- a/src/rocker/build2/build_test.go +++ b/src/rocker/build2/build_test.go @@ -17,6 +17,7 @@ package build2 import ( + "io" "rocker/template" "runtime" "strings" @@ -82,3 +83,8 @@ func (m *MockClient) RemoveContainer(containerID string) error { args := m.Called(containerID) return args.Error(0) } + +func (m *MockClient) UploadToContainer(containerID string, stream io.Reader, path string) error { + args := m.Called(containerID, stream, path) + return args.Error(0) +} diff --git a/src/rocker/build2/client.go b/src/rocker/build2/client.go index 18553166..1f98e3de 100644 --- a/src/rocker/build2/client.go +++ b/src/rocker/build2/client.go @@ -38,6 +38,7 @@ type Client interface { RunContainer(containerID string, attach bool) error CommitContainer(state State, message string) (imageID string, err error) RemoveContainer(containerID string) error + UploadToContainer(containerID string, stream io.Reader, path string) error } type DockerClient struct { @@ -289,3 +290,15 @@ func (c *DockerClient) RemoveContainer(containerID string) error { return c.client.RemoveContainer(opts) } + +func (c *DockerClient) UploadToContainer(containerID string, stream io.Reader, path string) error { + log.Infof(" | Uploading files to container %.12s", containerID) + + opts := docker.UploadToContainerOptions{ + InputStream: stream, + Path: path, + NoOverwriteDirNonDir: false, + } + + return c.client.UploadToContainer(containerID, opts) +} diff --git a/src/rocker/build2/commands.go b/src/rocker/build2/commands.go index 84b627ef..135fb3d2 100644 --- a/src/rocker/build2/commands.go +++ b/src/rocker/build2/commands.go @@ -309,5 +309,8 @@ func (c *CommandCopy) String() string { } func (c *CommandCopy) Execute(b *Build) (State, error) { - return b.state, nil + if len(c.cfg.args) < 2 { + return b.state, fmt.Errorf("COPY requires at least two arguments") + } + return copyFiles(b, c.cfg.args, "COPY") } diff --git a/src/rocker/build2/commands_test.go b/src/rocker/build2/commands_test.go index 59a00f38..330695d2 100644 --- a/src/rocker/build2/commands_test.go +++ b/src/rocker/build2/commands_test.go @@ -19,6 +19,7 @@ package build2 import ( "testing" + "github.com/kr/pretty" "github.com/stretchr/testify/mock" "github.com/fsouza/go-dockerclient" @@ -276,3 +277,32 @@ func TestCommandCmd_Json(t *testing.T) { assert.Equal(t, []string{"apt-get", "install"}, state.config.Cmd) } + +// =========== Testing COPY =========== + +func TestCommandCopy_Simple(t *testing.T) { + // TODO: do we need to check the dest is always a directory? + b, c := makeBuild(t, "", Config{}) + cmd := &CommandCopy{ConfigCommand{ + args: []string{"testdata/file.txt", "/file.txt"}, + }} + + c.On("CreateContainer", mock.AnythingOfType("State")).Return("456", nil).Run(func(args mock.Arguments) { + arg := args.Get(0).(State) + // TODO: a better check + assert.True(t, len(arg.config.Cmd) > 0) + }).Once() + + c.On("UploadToContainer", "456", mock.AnythingOfType("*io.PipeReader"), "/file.txt").Return(nil).Once() + + state, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + // assert.Equal(t, []string{"/bin/sh", "-c", "apt-get install"}, state.config.Cmd) + pretty.Println(state) + + c.AssertExpectations(t) + assert.Equal(t, "456", state.containerID) +} diff --git a/src/rocker/build2/copy.go b/src/rocker/build2/copy.go new file mode 100644 index 00000000..9574e3aa --- /dev/null +++ b/src/rocker/build2/copy.go @@ -0,0 +1,372 @@ +// NOTICE: it was originally grabbed from the docker source and +// adopted for use by rocker; see LICENSE in the current +// directory from the license and the copyright. +// +// Copyright 2013-2015 Docker, Inc. + +package build2 + +import ( + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/docker/docker/pkg/httputils" + "github.com/docker/docker/pkg/tarsum" + "github.com/docker/docker/pkg/urlutil" + "github.com/fsouza/go-dockerclient/vendor/github.com/docker/docker/pkg/archive" + "github.com/fsouza/go-dockerclient/vendor/github.com/docker/docker/pkg/system" +) + +type copyInfo struct { + origPath string + destPath string + hash string + decompress bool + tmpDir string +} + +func copyCommand(b *Build, args []string, allowRemote bool, allowDecompression bool, cmdName string) (s State, err error) { + + s = b.state + + if len(args) < 2 { + return s, fmt.Errorf("Invalid %s format - at least two arguments required", cmdName) + } + + // Work in daemon-specific filepath semantics + dest := filepath.FromSlash(args[len(args)-1]) // last one is always the dest + + copyInfos := []*copyInfo{} + + // b.Config.Image = b.image + + defer func() { + for _, ci := range copyInfos { + if ci.tmpDir != "" { + os.RemoveAll(ci.tmpDir) + } + } + }() + + // Loop through each src file and calculate the info we need to + // do the copy (e.g. hash value if cached). Don't actually do + // the copy until we've looked at all src files + for _, orig := range args[0 : len(args)-1] { + if err := calcCopyInfo( + b, + cmdName, + ©Infos, + orig, + dest, + allowRemote, + allowDecompression, + true, + ); err != nil { + return s, err + } + } + + if len(copyInfos) == 0 { + return s, fmt.Errorf("No source files were specified") + } + if len(copyInfos) > 1 && !strings.HasSuffix(dest, string(os.PathSeparator)) { + return s, fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName) + } + + // For backwards compat, if there's just one CI then use it as the + // cache look-up string, otherwise hash 'em all into one + var srcHash string + // var origPaths string + + if len(copyInfos) == 1 { + srcHash = copyInfos[0].hash + // origPaths = copyInfos[0].origPath + } else { + var hashs []string + var origs []string + for _, ci := range copyInfos { + hashs = append(hashs, ci.hash) + origs = append(origs, ci.origPath) + } + hasher := sha256.New() + hasher.Write([]byte(strings.Join(hashs, ","))) + srcHash = "multi:" + hex.EncodeToString(hasher.Sum(nil)) + // origPaths = strings.Join(origs, " ") + } + + s.commitMsg = append(s.commitMsg, fmt.Sprintf("%s %s in %s", cmdName, srcHash, dest)) + + // TODO: probe cache + + // TODO: do the actual copy + + // for _, ci := range copyInfos { + // if err := b.addContext(container, ci.origPath, ci.destPath, ci.decompress); err != nil { + // return err + // } + // } + + return s, nil +} + +func calcCopyInfo(b *Build, cmdName string, cInfos *[]*copyInfo, origPath string, destPath string, allowRemote bool, allowDecompression bool, allowWildcards bool) error { + + // Work in daemon-specific OS filepath semantics. However, we save + // the the origPath passed in here, as it might also be a URL which + // we need to check for in this function. + passedInOrigPath := origPath + origPath = filepath.FromSlash(origPath) + destPath = filepath.FromSlash(destPath) + + if origPath != "" && origPath[0] == os.PathSeparator && len(origPath) > 1 { + origPath = origPath[1:] + } + origPath = strings.TrimPrefix(origPath, "."+string(os.PathSeparator)) + + // Twiddle the destPath when its a relative path - meaning, make it + // relative to the WORKINGDIR + if !filepath.IsAbs(destPath) { + hasSlash := strings.HasSuffix(destPath, string(os.PathSeparator)) + destPath = filepath.Join(string(os.PathSeparator), filepath.FromSlash(b.state.config.WorkingDir), destPath) + + // Make sure we preserve any trailing slash + if hasSlash { + destPath += string(os.PathSeparator) + } + } + + // In the remote/URL case, download it and gen its hashcode + if urlutil.IsURL(passedInOrigPath) { + + // As it's a URL, we go back to processing on what was passed in + // to this function + origPath = passedInOrigPath + + if !allowRemote { + return fmt.Errorf("Source can't be a URL for %s", cmdName) + } + + ci := copyInfo{} + ci.origPath = origPath + ci.hash = origPath // default to this but can change + ci.destPath = destPath + ci.decompress = false + *cInfos = append(*cInfos, &ci) + + // Initiate the download + resp, err := httputils.Download(ci.origPath) + if err != nil { + return err + } + + // Create a tmp dir + tmpDirName, err := ioutil.TempDir(b.cfg.ContextDir, "docker-remote") + if err != nil { + return err + } + ci.tmpDir = tmpDirName + + // Create a tmp file within our tmp dir + tmpFileName := filepath.Join(tmpDirName, "tmp") + tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + if err != nil { + return err + } + + // Download and dump result to tmp file + // TODO: adopt Docker's progressreader? + if _, err := io.Copy(tmpFile, resp.Body); err != nil { + tmpFile.Close() + return err + } + tmpFile.Close() + + // Set the mtime to the Last-Modified header value if present + // Otherwise just remove atime and mtime + times := make([]syscall.Timespec, 2) + + lastMod := resp.Header.Get("Last-Modified") + if lastMod != "" { + mTime, err := http.ParseTime(lastMod) + // If we can't parse it then just let it default to 'zero' + // otherwise use the parsed time value + if err == nil { + times[1] = syscall.NsecToTimespec(mTime.UnixNano()) + } + } + + if err := system.UtimesNano(tmpFileName, times); err != nil { + return err + } + + ci.origPath = filepath.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName)) + + // If the destination is a directory, figure out the filename. + if strings.HasSuffix(ci.destPath, string(os.PathSeparator)) { + u, err := url.Parse(origPath) + if err != nil { + return err + } + path := u.Path + if strings.HasSuffix(path, string(os.PathSeparator)) { + path = path[:len(path)-1] + } + parts := strings.Split(path, string(os.PathSeparator)) + filename := parts[len(parts)-1] + if filename == "" { + return fmt.Errorf("cannot determine filename from url: %s", u) + } + ci.destPath = ci.destPath + filename + } + + // Calc the checksum, even if we're using the cache + r, err := archive.Tar(tmpFileName, archive.Uncompressed) + if err != nil { + return err + } + tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version1) + if err != nil { + return err + } + if _, err := io.Copy(ioutil.Discard, tarSum); err != nil { + return err + } + ci.hash = tarSum.Sum(nil) + r.Close() + + return nil + } + + // TODO: Deal with wildcards + // if allowWildcards && containsWildcards(origPath) { + // for _, fileInfo := range b.context.GetSums() { + // if fileInfo.Name() == "" { + // continue + // } + // match, _ := filepath.Match(origPath, fileInfo.Name()) + // if !match { + // continue + // } + + // // Note we set allowWildcards to false in case the name has + // // a * in it + // calcCopyInfo(b, cmdName, cInfos, fileInfo.Name(), destPath, allowRemote, allowDecompression, false) + // } + // return nil + // } + + // Must be a dir or a file + + if err := checkPathForAddition(b, origPath); err != nil { + return err + } + fi, _ := os.Stat(filepath.Join(b.cfg.ContextDir, origPath)) + + ci := copyInfo{} + ci.origPath = origPath + ci.hash = origPath + ci.destPath = destPath + ci.decompress = allowDecompression + *cInfos = append(*cInfos, &ci) + + // Deal with the single file case + if !fi.IsDir() { + r, err := archive.Tar(ci.origPath, archive.Uncompressed) + if err != nil { + return err + } + tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version1) + if err != nil { + return err + } + if _, err := io.Copy(ioutil.Discard, tarSum); err != nil { + return err + } + ci.hash = "file:" + tarSum.Sum(nil) + r.Close() + + // This will match first file in sums of the archive + // fis := b.context.GetSums().GetFile(ci.origPath) + // if fis != nil { + // ci.hash = "file:" + fis.Sum() + // } + return nil + } + + // TODO: tarsum for dirs + // NewTarWithOptions might do the trick + + // Must be a dir + // var subfiles []string + // absOrigPath := filepath.Join(b.cfg.ContextDir, ci.origPath) + + // // Add a trailing / to make sure we only pick up nested files under + // // the dir and not sibling files of the dir that just happen to + // // start with the same chars + // if !strings.HasSuffix(absOrigPath, string(os.PathSeparator)) { + // absOrigPath += string(os.PathSeparator) + // } + + // // Need path w/o slash too to find matching dir w/o trailing slash + // absOrigPathNoSlash := absOrigPath[:len(absOrigPath)-1] + + // for _, fileInfo := range b.context.GetSums() { + // absFile := filepath.Join(b.contextPath, fileInfo.Name()) + // // Any file in the context that starts with the given path will be + // // picked up and its hashcode used. However, we'll exclude the + // // root dir itself. We do this for a coupel of reasons: + // // 1 - ADD/COPY will not copy the dir itself, just its children + // // so there's no reason to include it in the hash calc + // // 2 - the metadata on the dir will change when any child file + // // changes. This will lead to a miss in the cache check if that + // // child file is in the .dockerignore list. + // if strings.HasPrefix(absFile, absOrigPath) && absFile != absOrigPathNoSlash { + // subfiles = append(subfiles, fileInfo.Sum()) + // } + // } + // sort.Strings(subfiles) + // hasher := sha256.New() + // hasher.Write([]byte(strings.Join(subfiles, ","))) + // ci.hash = "dir:" + hex.EncodeToString(hasher.Sum(nil)) + + return nil +} + +func containsWildcards(name string) bool { + for i := 0; i < len(name); i++ { + ch := name[i] + if ch == '\\' { + i++ + } else if ch == '*' || ch == '?' || ch == '[' { + return true + } + } + return false +} + +func checkPathForAddition(b *Build, orig string) error { + origPath := filepath.Join(b.cfg.ContextDir, orig) + origPath, err := filepath.EvalSymlinks(origPath) + if err != nil { + if os.IsNotExist(err) { + return fmt.Errorf("%s: no such file or directory", orig) + } + return err + } + if _, err := os.Stat(origPath); err != nil { + if os.IsNotExist(err) { + return fmt.Errorf("%s: no such file or directory", orig) + } + return err + } + return nil +} diff --git a/src/rocker/build2/copy2.go b/src/rocker/build2/copy2.go new file mode 100644 index 00000000..f174891f --- /dev/null +++ b/src/rocker/build2/copy2.go @@ -0,0 +1,154 @@ +/*- + * Copyright 2015 Grammarly, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package build2 + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/tarsum" + "github.com/fsouza/go-dockerclient/vendor/github.com/docker/docker/pkg/archive" + "github.com/fsouza/go-dockerclient/vendor/github.com/docker/docker/pkg/fileutils" +) + +func copyFiles(b *Build, args []string, cmdName string) (s State, err error) { + + s = b.state + + if len(args) < 2 { + return s, fmt.Errorf("Invalid %s format - at least two arguments required", cmdName) + } + + // TODO: do we need to check the dest is always a directory? + + var ( + tar io.ReadCloser + tarSum tarsum.TarSum + src = args[0 : len(args)-1] + dest = filepath.FromSlash(args[len(args)-1]) // last one is always the dest + + // TODO: read .dockerignore + excludes = []string{} + ) + + if tar, err = makeTarStream(b.cfg.ContextDir, src, excludes); err != nil { + return s, err + } + + if tarSum, err = tarsum.NewTarSum(tar, true, tarsum.Version1); err != nil { + return s, err + } + if _, err = io.Copy(ioutil.Discard, tarSum); err != nil { + return s, err + } + tar.Close() + + message := fmt.Sprintf("%s %s to %s", cmdName, tarSum.Sum(nil), dest) + s.commitMsg = append(s.commitMsg, message) + + origCmd := s.config.Cmd + s.config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + message} + + if s.containerID, err = b.client.CreateContainer(s); err != nil { + return s, err + } + + s.config.Cmd = origCmd + + // We need to make a new tar stream, because the previous one has been + // read by the tarsum; maybe, optimize this in future + if tar, err = makeTarStream(b.cfg.ContextDir, src, excludes); err != nil { + return s, err + } + defer tar.Close() + + if err = b.client.UploadToContainer(s.containerID, tar, dest); err != nil { + return s, err + } + + return s, nil +} + +func makeTarStream(srcPath string, includes, excludes []string) (tar io.ReadCloser, err error) { + + if includes, err = expandIncludes(srcPath, includes, excludes); err != nil { + return nil, err + } + + tarOpts := &archive.TarOptions{ + IncludeFiles: includes, + ExcludePatterns: excludes, + Compression: archive.Uncompressed, + NoLchown: true, + } + + return archive.TarWithOptions(srcPath, tarOpts) +} + +func expandIncludes(srcPath string, includes, excludes []string) (result []string, err error) { + result = []string{} + + for _, filePath := range includes { + + matches, err := filepath.Glob(filepath.Join(srcPath, filePath)) + if err != nil { + return result, err + } + + for _, match := range matches { + + relFilePath, err := filepath.Rel(srcPath, match) + if err != nil { + return result, err + } + + skip, err := fileutils.Matches(relFilePath, excludes) + if err != nil { + return result, err + } + if skip { + continue + } + + f, err := os.Stat(match) + if err != nil { + return result, err + } + + // skip checking if symlinks point to non-existing file + // also skip named pipes, because they hanging on open + if f.Mode()&(os.ModeSymlink|os.ModeNamedPipe) != 0 { + continue + } + + if !f.IsDir() { + currentFile, err := os.Open(filePath) + if err != nil && os.IsPermission(err) { + return result, fmt.Errorf("no permission to read from '%s'", filePath) + } + currentFile.Close() + } + + result = append(result, relFilePath) + } + } + + return result, nil +} diff --git a/src/rocker/build2/copy2_test.go b/src/rocker/build2/copy2_test.go new file mode 100644 index 00000000..c4c36cc8 --- /dev/null +++ b/src/rocker/build2/copy2_test.go @@ -0,0 +1,88 @@ +/*- + * Copyright 2015 Grammarly, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package build2 + +import ( + "bytes" + "io" + "io/ioutil" + "os" + "testing" + + "github.com/docker/docker/pkg/tarsum" + "github.com/kr/pretty" +) + +func TestMakeTarStream_Basic(t *testing.T) { + wd, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + includes := []string{ + "testdata/*.txt", + } + excludes := []string{ + "testdata/*.tar", + "testdata/*.txt2", + } + + stream, err := makeTarStream(wd, includes, excludes) + if err != nil { + t.Fatal(err) + } + data, err := ioutil.ReadAll(stream) + if err != nil { + t.Fatal(err) + } + + tarSum, err := tarsum.NewTarSum(bytes.NewReader(data), true, tarsum.Version1) + if err != nil { + t.Fatal(err) + } + if _, err := io.Copy(ioutil.Discard, tarSum); err != nil { + t.Fatal(err) + } + println("tarsum:" + tarSum.Sum(nil)) + + if err := ioutil.WriteFile("testdata/file.tar", data, 0644); err != nil { + t.Fatal(err) + } + + println("Written to testdata/file.tar") + +} + +func TestExpandIncludes_Basic(t *testing.T) { + wd, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + includes := []string{ + "testdata", + } + excludes := []string{ + "testdata/*.tar", + "testdata/*.txt2", + } + + matches, err := expandIncludes(wd, includes, excludes) + if err != nil { + t.Fatal(err) + } + + pretty.Println(matches) +} diff --git a/src/rocker/build2/testdata/file.txt b/src/rocker/build2/testdata/file.txt new file mode 100644 index 00000000..ce013625 --- /dev/null +++ b/src/rocker/build2/testdata/file.txt @@ -0,0 +1 @@ +hello From 0cbad44ae014d322df34eec35a9b1c515ec7f23d Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Mon, 21 Sep 2015 13:41:49 +0300 Subject: [PATCH 019/131] ignore some testing stuff --- .gitignore | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitignore b/.gitignore index 53417165..0a20ae6c 100644 --- a/.gitignore +++ b/.gitignore @@ -7,3 +7,6 @@ dev .idea *.sublime-workspace + +src/rocker/build2/testdata/file.tar +src/rocker/build2/testdata/file.txt2 From a818743aa897492278f573b700cc9e8c47ee789b Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Tue, 22 Sep 2015 06:19:09 +0300 Subject: [PATCH 020/131] implement -no-garbage option to remove images from the tail --- src/cmd/rocker/main.go | 5 +++++ src/rocker/build2/build.go | 9 +++++++++ src/rocker/build2/build_test.go | 5 +++++ src/rocker/build2/client.go | 11 +++++++++++ 4 files changed, 30 insertions(+) diff --git a/src/cmd/rocker/main.go b/src/cmd/rocker/main.go index 7085999a..adf4111b 100644 --- a/src/cmd/rocker/main.go +++ b/src/cmd/rocker/main.go @@ -131,6 +131,10 @@ func main() { Name: "artifacts-path", Usage: "put artifacts (files with pushed images description) to the directory", }, + cli.BoolFlag{ + Name: "no-garbage", + Usage: "remove the images from the tail if not tagged", + }, } app.Commands = []cli.Command{ @@ -261,6 +265,7 @@ func buildCommand(c *cli.Context) { OutStream: os.Stdout, ContextDir: contextDir, Pull: c.Bool("pull"), + NoGarbage: c.Bool("no-garbage"), }) plan, err := build2.NewPlan(builder) diff --git a/src/rocker/build2/build.go b/src/rocker/build2/build.go index d508bb09..1136aa6b 100644 --- a/src/rocker/build2/build.go +++ b/src/rocker/build2/build.go @@ -34,6 +34,7 @@ type Config struct { InStream io.ReadCloser ContextDir string Pull bool + NoGarbage bool } type State struct { @@ -61,6 +62,7 @@ func New(client Client, rockerfile *Rockerfile, cfg Config) *Build { } func (b *Build) Run(plan Plan) (err error) { + for k, c := range plan { log.Debugf("Step %d: %# v", k+1, pretty.Formatter(c)) @@ -72,6 +74,13 @@ func (b *Build) Run(plan Plan) (err error) { log.Debugf("State after step %d: %# v", k+1, pretty.Formatter(b.state)) } + + if b.cfg.NoGarbage && b.state.imageID != "" { + if err := b.client.RemoveImage(b.state.imageID); err != nil { + return err + } + } + return nil } diff --git a/src/rocker/build2/build_test.go b/src/rocker/build2/build_test.go index 23c5c724..444d3031 100644 --- a/src/rocker/build2/build_test.go +++ b/src/rocker/build2/build_test.go @@ -64,6 +64,11 @@ func (m *MockClient) PullImage(name string) error { return args.Error(0) } +func (m *MockClient) RemoveImage(imageID string) error { + args := m.Called(imageID) + return args.Error(0) +} + func (m *MockClient) CreateContainer(state State) (string, error) { args := m.Called(state) return args.String(0), args.Error(1) diff --git a/src/rocker/build2/client.go b/src/rocker/build2/client.go index 1f98e3de..3e691493 100644 --- a/src/rocker/build2/client.go +++ b/src/rocker/build2/client.go @@ -34,6 +34,7 @@ import ( type Client interface { InspectImage(name string) (*docker.Image, error) PullImage(name string) error + RemoveImage(imageID string) error CreateContainer(state State) (id string, err error) RunContainer(containerID string, attach bool) error CommitContainer(state State, message string) (imageID string, err error) @@ -106,6 +107,16 @@ func (c *DockerClient) PullImage(name string) error { return nil } +func (c *DockerClient) RemoveImage(imageID string) error { + log.Infof(" | Remove image %.12s", imageID) + + opts := docker.RemoveImageOptions{ + Force: true, + NoPrune: false, + } + return c.client.RemoveImageExtended(imageID, opts) +} + func (c *DockerClient) CreateContainer(s State) (string, error) { // TODO: mount volumes // volumesFrom := builder.getMountContainerIds() From c97b20b514cfd4b1bfecd9182e51dfcb3ac842d3 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Tue, 22 Sep 2015 06:19:34 +0300 Subject: [PATCH 021/131] client report size of images when committing --- src/rocker/build2/client.go | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/src/rocker/build2/client.go b/src/rocker/build2/client.go index 3e691493..b44ae17a 100644 --- a/src/rocker/build2/client.go +++ b/src/rocker/build2/client.go @@ -23,6 +23,8 @@ import ( "os/signal" "rocker/imagename" + "github.com/docker/docker/pkg/units" + "github.com/docker/docker/pkg/jsonmessage" "github.com/docker/docker/pkg/term" "github.com/fsouza/go-dockerclient" @@ -285,7 +287,21 @@ func (c *DockerClient) CommitContainer(s State, message string) (string, error) return "", err } - log.Infof(" | Result image is %.12s", image.ID) + // Inspect the image to get the real size + log.Debugf("Inspect image %s", image.ID) + + if image, err = c.client.InspectImage(image.ID); err != nil { + return "", err + } + + size := fmt.Sprintf("%s (+%s)", + units.HumanSize(float64(image.VirtualSize)), + units.HumanSize(float64(image.Size)), + ) + + log.WithFields(log.Fields{ + "size": size, + }).Infof(" | Result image is %.12s", image.ID) return image.ID, nil } From 66eaad91ac014e4006da5d7c9b8a2ac0a2666e73 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Tue, 22 Sep 2015 06:46:05 +0300 Subject: [PATCH 022/131] move -no-garbage impl to cleanup cmd, change plan reset -> cleanup plan: make cleanup at the end --- src/rocker/build2/build.go | 6 ---- src/rocker/build2/commands.go | 28 +++++++++++---- src/rocker/build2/plan.go | 14 +++++++- src/rocker/build2/plan_test.go | 64 +++++++++++++++++++++++++++++++--- 4 files changed, 94 insertions(+), 18 deletions(-) diff --git a/src/rocker/build2/build.go b/src/rocker/build2/build.go index 1136aa6b..ed85bd3f 100644 --- a/src/rocker/build2/build.go +++ b/src/rocker/build2/build.go @@ -75,12 +75,6 @@ func (b *Build) Run(plan Plan) (err error) { log.Debugf("State after step %d: %# v", k+1, pretty.Formatter(b.state)) } - if b.cfg.NoGarbage && b.state.imageID != "" { - if err := b.client.RemoveImage(b.state.imageID); err != nil { - return err - } - } - return nil } diff --git a/src/rocker/build2/commands.go b/src/rocker/build2/commands.go index 135fb3d2..7391fd03 100644 --- a/src/rocker/build2/commands.go +++ b/src/rocker/build2/commands.go @@ -109,16 +109,30 @@ func (c *CommandFrom) Execute(b *Build) (state State, err error) { } // CommandReset cleans the builder state before the next FROM -type CommandReset struct{} +type CommandCleanup struct { + final bool + tagged bool +} -func (c *CommandReset) String() string { - return "Cleaning up state before the next FROM" +func (c *CommandCleanup) String() string { + return "Cleaning up" } -func (c *CommandReset) Execute(b *Build) (State, error) { - state := b.state - state.imageID = "" - return state, nil +func (c *CommandCleanup) Execute(b *Build) (State, error) { + s := b.state + + if b.cfg.NoGarbage && !c.tagged && s.imageID != "" { + if err := b.client.RemoveImage(s.imageID); err != nil { + return s, err + } + } + + // For final cleanup we want to keep imageID + if !c.final { + s.imageID = "" + } + + return s, nil } // CommandCommit commits collected changes diff --git a/src/rocker/build2/plan.go b/src/rocker/build2/plan.go index fd92442d..47b135d6 100644 --- a/src/rocker/build2/plan.go +++ b/src/rocker/build2/plan.go @@ -31,6 +31,13 @@ func NewPlan(b *Build) (plan Plan, err error) { committed = true } + cleanup := func(i int) { + plan = append(plan, &CommandCleanup{ + final: i == len(commands)-1, + tagged: strings.Contains("tag push from", commands[i].name), + }) + } + alwaysCommitBefore := "run attach add copy tag push" alwaysCommitAfter := "run attach add copy" neverCommitAfter := "from tag push" @@ -50,7 +57,7 @@ func NewPlan(b *Build) (plan Plan, err error) { commit() } if i > 0 { - plan = append(plan, &CommandReset{}) + cleanup(i - 1) } } @@ -76,6 +83,11 @@ func NewPlan(b *Build) (plan Plan, err error) { commit() } } + + // Always cleanup at the end + if i == len(commands)-1 { + cleanup(i) + } } return plan, err diff --git a/src/rocker/build2/plan_test.go b/src/rocker/build2/plan_test.go index 0290c28d..460164b0 100644 --- a/src/rocker/build2/plan_test.go +++ b/src/rocker/build2/plan_test.go @@ -29,6 +29,7 @@ FROM ubuntu expected := []Command{ &CommandFrom{}, + &CommandCleanup{}, } assert.Len(t, p, len(expected)) @@ -47,6 +48,7 @@ RUN apt-get update &CommandFrom{}, &CommandRun{}, &CommandCommit{}, + &CommandCleanup{}, } assert.Len(t, p, len(expected)) @@ -70,6 +72,7 @@ RUN apt-get update &CommandCommit{}, &CommandRun{}, &CommandCommit{}, + &CommandCleanup{}, } assert.Len(t, p, len(expected)) @@ -88,6 +91,7 @@ ENV name=web &CommandFrom{}, &CommandEnv{}, &CommandCommit{}, + &CommandCleanup{}, } assert.Len(t, p, len(expected)) @@ -104,8 +108,9 @@ FROM alpine expected := []Command{ &CommandFrom{}, - &CommandReset{}, + &CommandCleanup{}, &CommandFrom{}, + &CommandCleanup{}, } assert.Len(t, p, len(expected)) @@ -125,8 +130,9 @@ FROM alpine &CommandFrom{}, &CommandEnv{}, &CommandCommit{}, - &CommandReset{}, + &CommandCleanup{}, &CommandFrom{}, + &CommandCleanup{}, } assert.Len(t, p, len(expected)) @@ -147,10 +153,11 @@ ENV mode=run &CommandFrom{}, &CommandEnv{}, &CommandCommit{}, - &CommandReset{}, + &CommandCleanup{}, &CommandFrom{}, &CommandEnv{}, &CommandCommit{}, + &CommandCleanup{}, } assert.Len(t, p, len(expected)) @@ -168,6 +175,7 @@ TAG my-build expected := []Command{ &CommandFrom{}, &CommandTag{}, + &CommandCleanup{}, } assert.Len(t, p, len(expected)) @@ -188,6 +196,7 @@ TAG my-build &CommandEnv{}, &CommandCommit{}, &CommandTag{}, + &CommandCleanup{}, } assert.Len(t, p, len(expected)) @@ -208,6 +217,7 @@ ENV type=web &CommandTag{}, &CommandEnv{}, &CommandCommit{}, + &CommandCleanup{}, } assert.Len(t, p, len(expected)) @@ -226,8 +236,9 @@ FROM alpine expected := []Command{ &CommandFrom{}, &CommandTag{}, - &CommandReset{}, + &CommandCleanup{}, &CommandFrom{}, + &CommandCleanup{}, } assert.Len(t, p, len(expected)) @@ -248,6 +259,7 @@ TAG my-build &CommandRun{}, &CommandCommit{}, &CommandTag{}, + &CommandCleanup{}, } assert.Len(t, p, len(expected)) @@ -266,6 +278,7 @@ COPY rootfs / &CommandFrom{}, &CommandCopy{}, &CommandCommit{}, + &CommandCleanup{}, } assert.Len(t, p, len(expected)) @@ -274,6 +287,49 @@ COPY rootfs / } } +func TestPlan_CleanupTaggedFinal(t *testing.T) { + p := makePlan(t, ` +FROM ubuntu +TAG dev +`) + + // from, tag, cleanup + c := p[2] + + assert.IsType(t, &CommandCleanup{}, c) + assert.True(t, c.(*CommandCleanup).tagged) + assert.True(t, c.(*CommandCleanup).final) +} + +func TestPlan_CleanupNotTaggedFinal(t *testing.T) { + p := makePlan(t, ` +FROM ubuntu +ENV foo=bar +`) + + // from, env, commit, cleanup + c := p[3] + + assert.IsType(t, &CommandCleanup{}, c) + assert.False(t, c.(*CommandCleanup).tagged) + assert.True(t, c.(*CommandCleanup).final) +} + +func TestPlan_CleanupNotTaggedMiddleFrom(t *testing.T) { + p := makePlan(t, ` +FROM ubuntu +ENV foo=bar +FROM alpine +`) + + // from, env, commit, cleanup, from, cleanup + c := p[3] + + assert.IsType(t, &CommandCleanup{}, c) + assert.False(t, c.(*CommandCleanup).tagged) + assert.False(t, c.(*CommandCleanup).final) +} + // internal helpers func makePlan(t *testing.T, rockerfileContent string) Plan { From 21741c48d2585b4524495096a57ca8791fdbc46c Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Tue, 22 Sep 2015 06:57:28 +0300 Subject: [PATCH 023/131] report the size of FROM --- src/rocker/build2/commands.go | 30 ++++++++++++++++++++---------- 1 file changed, 20 insertions(+), 10 deletions(-) diff --git a/src/rocker/build2/commands.go b/src/rocker/build2/commands.go index 7391fd03..45f544d3 100644 --- a/src/rocker/build2/commands.go +++ b/src/rocker/build2/commands.go @@ -20,6 +20,8 @@ import ( "fmt" "strings" + log "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/units" "github.com/fsouza/go-dockerclient" ) @@ -70,11 +72,11 @@ func (c *CommandFrom) String() string { return c.cfg.original } -func (c *CommandFrom) Execute(b *Build) (state State, err error) { +func (c *CommandFrom) Execute(b *Build) (s State, err error) { // TODO: for "scratch" image we may use /images/create if len(c.cfg.args) != 1 { - return state, fmt.Errorf("FROM requires one argument") + return s, fmt.Errorf("FROM requires one argument") } var ( @@ -85,27 +87,35 @@ func (c *CommandFrom) Execute(b *Build) (state State, err error) { // If Pull is true, then img will remain nil and it will be pulled below if !b.cfg.Pull { if img, err = b.client.InspectImage(name); err != nil { - return state, err + return s, err } } if img == nil { if err = b.client.PullImage(name); err != nil { - return state, err + return s, err } if img, err = b.client.InspectImage(name); err != nil { - return state, err + return s, err } if img == nil { - return state, fmt.Errorf("FROM: Failed to inspect image after pull: %s", name) + return s, fmt.Errorf("FROM: Failed to inspect image after pull: %s", name) } } - state = b.state - state.imageID = img.ID - state.config = *img.Config + // We want to say the size of the FROM image. Better to do it + // from the client, but don't know how to do it better, + // without duplicating InspectImage calls and making unnecessary functions + + log.WithFields(log.Fields{ + "size": units.HumanSize(float64(img.VirtualSize)), + }).Infof(" | Image %.12s", img.ID) - return state, nil + s = b.state + s.imageID = img.ID + s.config = *img.Config + + return s, nil } // CommandReset cleans the builder state before the next FROM From a2d7aca1344535ad0a8f2e4168d89b7d360ca5c6 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Tue, 22 Sep 2015 07:05:36 +0300 Subject: [PATCH 024/131] improve build output --- src/rocker/build2/build.go | 2 +- src/rocker/build2/client.go | 10 +++++----- src/rocker/build2/commands.go | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/rocker/build2/build.go b/src/rocker/build2/build.go index ed85bd3f..2c0d992a 100644 --- a/src/rocker/build2/build.go +++ b/src/rocker/build2/build.go @@ -66,7 +66,7 @@ func (b *Build) Run(plan Plan) (err error) { for k, c := range plan { log.Debugf("Step %d: %# v", k+1, pretty.Formatter(c)) - log.Infof("Step %d: %s", k+1, c) + log.Infof("%s", c) if b.state, err = c.Execute(b); err != nil { return err diff --git a/src/rocker/build2/client.go b/src/rocker/build2/client.go index b44ae17a..4de43843 100644 --- a/src/rocker/build2/client.go +++ b/src/rocker/build2/client.go @@ -110,7 +110,7 @@ func (c *DockerClient) PullImage(name string) error { } func (c *DockerClient) RemoveImage(imageID string) error { - log.Infof(" | Remove image %.12s", imageID) + log.Infof("| Remove image %.12s", imageID) opts := docker.RemoveImageOptions{ Force: true, @@ -143,7 +143,7 @@ func (c *DockerClient) CreateContainer(s State) (string, error) { return "", err } - log.Infof(" | Created container %.12s (image %.12s)", container.ID, s.imageID) + log.Infof("| Created container %.12s (image %.12s)", container.ID, s.imageID) return container.ID, nil } @@ -301,13 +301,13 @@ func (c *DockerClient) CommitContainer(s State, message string) (string, error) log.WithFields(log.Fields{ "size": size, - }).Infof(" | Result image is %.12s", image.ID) + }).Infof("| Result image is %.12s", image.ID) return image.ID, nil } func (c *DockerClient) RemoveContainer(containerID string) error { - log.Infof(" | Removing container %.12s", containerID) + log.Infof("| Removing container %.12s", containerID) opts := docker.RemoveContainerOptions{ ID: containerID, @@ -319,7 +319,7 @@ func (c *DockerClient) RemoveContainer(containerID string) error { } func (c *DockerClient) UploadToContainer(containerID string, stream io.Reader, path string) error { - log.Infof(" | Uploading files to container %.12s", containerID) + log.Infof("| Uploading files to container %.12s", containerID) opts := docker.UploadToContainerOptions{ InputStream: stream, diff --git a/src/rocker/build2/commands.go b/src/rocker/build2/commands.go index 45f544d3..09a33cdf 100644 --- a/src/rocker/build2/commands.go +++ b/src/rocker/build2/commands.go @@ -109,7 +109,7 @@ func (c *CommandFrom) Execute(b *Build) (s State, err error) { log.WithFields(log.Fields{ "size": units.HumanSize(float64(img.VirtualSize)), - }).Infof(" | Image %.12s", img.ID) + }).Infof("| Image %.12s", img.ID) s = b.state s.imageID = img.ID From 0003dcce93622ce93646c3a002b0a232efc9647f Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Wed, 23 Sep 2015 07:49:56 +0300 Subject: [PATCH 025/131] COPY implemented MVP --- src/rocker/build2/build.go | 7 +- src/rocker/build2/client.go | 1 + src/rocker/build2/commands.go | 8 +- src/rocker/build2/commands_test.go | 11 +- src/rocker/build2/copy.go | 510 ++++++++++++---------------- src/rocker/build2/copy2.go | 154 --------- src/rocker/build2/copy2_test.go | 88 ----- src/rocker/build2/copy_test.go | 341 +++++++++++++++++++ src/rocker/build2/tar.go | 118 +++++++ src/rocker/build2/tar_unix.go | 57 ++++ src/rocker/build2/tar_windows.go | 56 +++ src/rocker/build2/testdata/file.txt | 1 - 12 files changed, 815 insertions(+), 537 deletions(-) delete mode 100644 src/rocker/build2/copy2.go delete mode 100644 src/rocker/build2/copy2_test.go create mode 100644 src/rocker/build2/copy_test.go create mode 100644 src/rocker/build2/tar.go create mode 100644 src/rocker/build2/tar_unix.go create mode 100644 src/rocker/build2/tar_windows.go delete mode 100644 src/rocker/build2/testdata/file.txt diff --git a/src/rocker/build2/build.go b/src/rocker/build2/build.go index 2c0d992a..a5962c72 100644 --- a/src/rocker/build2/build.go +++ b/src/rocker/build2/build.go @@ -19,6 +19,8 @@ package build2 import ( "io" + "github.com/fatih/color" + "github.com/fsouza/go-dockerclient" "github.com/kr/pretty" @@ -42,7 +44,7 @@ type State struct { imageID string containerID string commitMsg []string - postCommit func(s State) (s1 State, err error) + skipCommit bool } type Build struct { @@ -66,7 +68,8 @@ func (b *Build) Run(plan Plan) (err error) { for k, c := range plan { log.Debugf("Step %d: %# v", k+1, pretty.Formatter(c)) - log.Infof("%s", c) + log.Infof("%s", color.New(color.FgWhite, color.Bold).SprintFunc()(c)) + // log.Infof("%s", color.New(color.FgBlue).SprintFunc()(c)) if b.state, err = c.Execute(b); err != nil { return err diff --git a/src/rocker/build2/client.go b/src/rocker/build2/client.go index 4de43843..d52c783d 100644 --- a/src/rocker/build2/client.go +++ b/src/rocker/build2/client.go @@ -262,6 +262,7 @@ func (c *DockerClient) RunContainer(containerID string, attachStdin bool) error return err } case <-sigch: + // TODO: Removing container twice for some reason log.Infof("Received SIGINT, remove current container...") if err := c.RemoveContainer(containerID); err != nil { log.Errorf("Failed to remove container: %s", err) diff --git a/src/rocker/build2/commands.go b/src/rocker/build2/commands.go index 09a33cdf..29dc1ca5 100644 --- a/src/rocker/build2/commands.go +++ b/src/rocker/build2/commands.go @@ -149,12 +149,18 @@ func (c *CommandCleanup) Execute(b *Build) (State, error) { type CommandCommit struct{} func (c *CommandCommit) String() string { - return "Commit layers" + return "Commit changes" } func (c *CommandCommit) Execute(b *Build) (s State, err error) { s = b.state + if s.skipCommit { + s.skipCommit = false + log.Infof("| Skip") + return s, nil + } + message := strings.Join(s.commitMsg, "; ") // Reset collected commit messages after the commit diff --git a/src/rocker/build2/commands_test.go b/src/rocker/build2/commands_test.go index 330695d2..ce8a3633 100644 --- a/src/rocker/build2/commands_test.go +++ b/src/rocker/build2/commands_test.go @@ -213,6 +213,8 @@ func TestCommandCommit_NoCommitMsgs(t *testing.T) { assert.Contains(t, err.Error(), "Nothing to commit") } +// TODO: test skip commit + // =========== Testing ENV =========== func TestCommandEnv_Simple(t *testing.T) { @@ -284,7 +286,7 @@ func TestCommandCopy_Simple(t *testing.T) { // TODO: do we need to check the dest is always a directory? b, c := makeBuild(t, "", Config{}) cmd := &CommandCopy{ConfigCommand{ - args: []string{"testdata/file.txt", "/file.txt"}, + args: []string{"testdata/Rockerfile", "/Rockerfile"}, }} c.On("CreateContainer", mock.AnythingOfType("State")).Return("456", nil).Run(func(args mock.Arguments) { @@ -293,16 +295,17 @@ func TestCommandCopy_Simple(t *testing.T) { assert.True(t, len(arg.config.Cmd) > 0) }).Once() - c.On("UploadToContainer", "456", mock.AnythingOfType("*io.PipeReader"), "/file.txt").Return(nil).Once() + c.On("UploadToContainer", "456", mock.AnythingOfType("*io.PipeReader"), "/").Return(nil).Once() state, err := cmd.Execute(b) if err != nil { t.Fatal(err) } - // assert.Equal(t, []string{"/bin/sh", "-c", "apt-get install"}, state.config.Cmd) - pretty.Println(state) + t.Logf("state: %# v", pretty.Formatter(state)) c.AssertExpectations(t) assert.Equal(t, "456", state.containerID) } + +// TODO: test Cleanup diff --git a/src/rocker/build2/copy.go b/src/rocker/build2/copy.go index 9574e3aa..dbcffb7f 100644 --- a/src/rocker/build2/copy.go +++ b/src/rocker/build2/copy.go @@ -1,40 +1,56 @@ -// NOTICE: it was originally grabbed from the docker source and -// adopted for use by rocker; see LICENSE in the current -// directory from the license and the copyright. -// -// Copyright 2013-2015 Docker, Inc. +/*- + * Copyright 2015 Grammarly, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package build2 import ( - "crypto/sha256" - "encoding/hex" + "archive/tar" + "bufio" "fmt" "io" "io/ioutil" - "net/http" - "net/url" "os" "path/filepath" "strings" - "syscall" - "github.com/docker/docker/pkg/httputils" "github.com/docker/docker/pkg/tarsum" - "github.com/docker/docker/pkg/urlutil" - "github.com/fsouza/go-dockerclient/vendor/github.com/docker/docker/pkg/archive" - "github.com/fsouza/go-dockerclient/vendor/github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/units" + "github.com/fsouza/go-dockerclient/vendor/github.com/docker/docker/pkg/fileutils" + "github.com/kr/pretty" + + log "github.com/Sirupsen/logrus" ) -type copyInfo struct { - origPath string - destPath string - hash string - decompress bool - tmpDir string +const buffer32K = 32 * 1024 + +type upload struct { + tar io.ReadCloser + size int64 + src string + files []*uploadFile + dest string +} + +type uploadFile struct { + src string + dest string + size int64 } -func copyCommand(b *Build, args []string, allowRemote bool, allowDecompression bool, cmdName string) (s State, err error) { +func copyFiles(b *Build, args []string, cmdName string) (s State, err error) { s = b.state @@ -42,303 +58,224 @@ func copyCommand(b *Build, args []string, allowRemote bool, allowDecompression b return s, fmt.Errorf("Invalid %s format - at least two arguments required", cmdName) } - // Work in daemon-specific filepath semantics - dest := filepath.FromSlash(args[len(args)-1]) // last one is always the dest + var ( + tarSum tarsum.TarSum + src = args[0 : len(args)-1] + dest = filepath.FromSlash(args[len(args)-1]) // last one is always the dest + u *upload - copyInfos := []*copyInfo{} + // TODO: read .dockerignore + excludes = []string{} + ) - // b.Config.Image = b.image - - defer func() { - for _, ci := range copyInfos { - if ci.tmpDir != "" { - os.RemoveAll(ci.tmpDir) - } - } - }() + if u, err = makeTarStream(b.cfg.ContextDir, dest, cmdName, src, excludes); err != nil { + return s, err + } - // Loop through each src file and calculate the info we need to - // do the copy (e.g. hash value if cached). Don't actually do - // the copy until we've looked at all src files - for _, orig := range args[0 : len(args)-1] { - if err := calcCopyInfo( - b, - cmdName, - ©Infos, - orig, - dest, - allowRemote, - allowDecompression, - true, - ); err != nil { - return s, err - } + // skip COPY if no files matched + if len(u.files) == 0 { + log.Infof("| No files matched") + s.skipCommit = true + return s, nil } - if len(copyInfos) == 0 { - return s, fmt.Errorf("No source files were specified") + log.Infof("| Calculating tarsum for %d files (%s total)", len(u.files), units.HumanSize(float64(u.size))) + + if tarSum, err = tarsum.NewTarSum(u.tar, true, tarsum.Version1); err != nil { + return s, err } - if len(copyInfos) > 1 && !strings.HasSuffix(dest, string(os.PathSeparator)) { - return s, fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName) + if _, err = io.Copy(ioutil.Discard, tarSum); err != nil { + return s, err } + u.tar.Close() - // For backwards compat, if there's just one CI then use it as the - // cache look-up string, otherwise hash 'em all into one - var srcHash string - // var origPaths string - - if len(copyInfos) == 1 { - srcHash = copyInfos[0].hash - // origPaths = copyInfos[0].origPath - } else { - var hashs []string - var origs []string - for _, ci := range copyInfos { - hashs = append(hashs, ci.hash) - origs = append(origs, ci.origPath) - } - hasher := sha256.New() - hasher.Write([]byte(strings.Join(hashs, ","))) - srcHash = "multi:" + hex.EncodeToString(hasher.Sum(nil)) - // origPaths = strings.Join(origs, " ") - } + // TODO: useful commit comment? - s.commitMsg = append(s.commitMsg, fmt.Sprintf("%s %s in %s", cmdName, srcHash, dest)) + message := fmt.Sprintf("%s %s to %s", cmdName, tarSum.Sum(nil), dest) + s.commitMsg = append(s.commitMsg, message) - // TODO: probe cache + origCmd := s.config.Cmd + s.config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + message} - // TODO: do the actual copy + if s.containerID, err = b.client.CreateContainer(s); err != nil { + return s, err + } - // for _, ci := range copyInfos { - // if err := b.addContext(container, ci.origPath, ci.destPath, ci.decompress); err != nil { - // return err - // } - // } + s.config.Cmd = origCmd + + // We need to make a new tar stream, because the previous one has been + // read by the tarsum; maybe, optimize this in future + if u, err = makeTarStream(b.cfg.ContextDir, dest, cmdName, src, excludes); err != nil { + return s, err + } + + // Copy to "/" because we made the prefix inside the tar archive + // Do that because we are not able to reliably create directories inside the container + if err = b.client.UploadToContainer(s.containerID, u.tar, "/"); err != nil { + return s, err + } return s, nil } -func calcCopyInfo(b *Build, cmdName string, cInfos *[]*copyInfo, origPath string, destPath string, allowRemote bool, allowDecompression bool, allowWildcards bool) error { - - // Work in daemon-specific OS filepath semantics. However, we save - // the the origPath passed in here, as it might also be a URL which - // we need to check for in this function. - passedInOrigPath := origPath - origPath = filepath.FromSlash(origPath) - destPath = filepath.FromSlash(destPath) +func makeTarStream(srcPath, dest, cmdName string, includes, excludes []string) (u *upload, err error) { - if origPath != "" && origPath[0] == os.PathSeparator && len(origPath) > 1 { - origPath = origPath[1:] + u = &upload{ + src: srcPath, + dest: dest, } - origPath = strings.TrimPrefix(origPath, "."+string(os.PathSeparator)) - // Twiddle the destPath when its a relative path - meaning, make it - // relative to the WORKINGDIR - if !filepath.IsAbs(destPath) { - hasSlash := strings.HasSuffix(destPath, string(os.PathSeparator)) - destPath = filepath.Join(string(os.PathSeparator), filepath.FromSlash(b.state.config.WorkingDir), destPath) + if u.files, err = listFiles(srcPath, includes, excludes); err != nil { + return u, err + } - // Make sure we preserve any trailing slash - if hasSlash { - destPath += string(os.PathSeparator) - } + // Calculate total size + for _, f := range u.files { + u.size += f.size } - // In the remote/URL case, download it and gen its hashcode - if urlutil.IsURL(passedInOrigPath) { + sep := string(os.PathSeparator) - // As it's a URL, we go back to processing on what was passed in - // to this function - origPath = passedInOrigPath + if len(u.files) == 0 { + return u, nil + } - if !allowRemote { - return fmt.Errorf("Source can't be a URL for %s", cmdName) + // If destination is not a directory (no leading slash) + if !strings.HasSuffix(u.dest, sep) { + if len(u.files) > 1 { + return u, fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName) } + // If we transfer a single file and the destination is not a directory, + // then rename it and remove prefix + u.files[0].dest = strings.TrimLeft(u.dest, sep) + u.dest = "" + } - ci := copyInfo{} - ci.origPath = origPath - ci.hash = origPath // default to this but can change - ci.destPath = destPath - ci.decompress = false - *cInfos = append(*cInfos, &ci) + // Cut the slash prefix from the dest, because it will be the root of the tar + // the archive will be always uploaded to the root of a container + if strings.HasPrefix(u.dest, sep) { + u.dest = u.dest[1:] + } - // Initiate the download - resp, err := httputils.Download(ci.origPath) - if err != nil { - return err - } + log.Debugf("Making archive prefix=%s %# v", u.dest, pretty.Formatter(u)) - // Create a tmp dir - tmpDirName, err := ioutil.TempDir(b.cfg.ContextDir, "docker-remote") - if err != nil { - return err - } - ci.tmpDir = tmpDirName + pipeReader, pipeWriter := io.Pipe() + u.tar = pipeReader - // Create a tmp file within our tmp dir - tmpFileName := filepath.Join(tmpDirName, "tmp") - tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) - if err != nil { - return err + go func() { + ta := &tarAppender{ + TarWriter: tar.NewWriter(pipeWriter), + Buffer: bufio.NewWriterSize(nil, buffer32K), + SeenFiles: make(map[uint64]string), } - // Download and dump result to tmp file - // TODO: adopt Docker's progressreader? - if _, err := io.Copy(tmpFile, resp.Body); err != nil { - tmpFile.Close() - return err - } - tmpFile.Close() - - // Set the mtime to the Last-Modified header value if present - // Otherwise just remove atime and mtime - times := make([]syscall.Timespec, 2) - - lastMod := resp.Header.Get("Last-Modified") - if lastMod != "" { - mTime, err := http.ParseTime(lastMod) - // If we can't parse it then just let it default to 'zero' - // otherwise use the parsed time value - if err == nil { - times[1] = syscall.NsecToTimespec(mTime.UnixNano()) + defer func() { + if err := ta.TarWriter.Close(); err != nil { + log.Errorf("Failed to close tar writer, error: %s", err) } - } + if err := pipeWriter.Close(); err != nil { + log.Errorf("Failed to close pipe writer, error: %s", err) + } + }() - if err := system.UtimesNano(tmpFileName, times); err != nil { - return err + // write files to tar + for _, f := range u.files { + ta.addTarFile(f.src, u.dest+f.dest) } + }() - ci.origPath = filepath.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName)) + return u, nil +} - // If the destination is a directory, figure out the filename. - if strings.HasSuffix(ci.destPath, string(os.PathSeparator)) { - u, err := url.Parse(origPath) - if err != nil { - return err - } - path := u.Path - if strings.HasSuffix(path, string(os.PathSeparator)) { - path = path[:len(path)-1] - } - parts := strings.Split(path, string(os.PathSeparator)) - filename := parts[len(parts)-1] - if filename == "" { - return fmt.Errorf("cannot determine filename from url: %s", u) - } - ci.destPath = ci.destPath + filename - } +func listFiles(srcPath string, includes, excludes []string) ([]*uploadFile, error) { - // Calc the checksum, even if we're using the cache - r, err := archive.Tar(tmpFileName, archive.Uncompressed) - if err != nil { - return err - } - tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version1) - if err != nil { - return err - } - if _, err := io.Copy(ioutil.Discard, tarSum); err != nil { - return err - } - ci.hash = tarSum.Sum(nil) - r.Close() + result := []*uploadFile{} + seen := map[string]struct{}{} - return nil - } + // TODO: support urls + // TODO: support local archives (and maybe a remote archives as well) - // TODO: Deal with wildcards - // if allowWildcards && containsWildcards(origPath) { - // for _, fileInfo := range b.context.GetSums() { - // if fileInfo.Name() == "" { - // continue - // } - // match, _ := filepath.Match(origPath, fileInfo.Name()) - // if !match { - // continue - // } - - // // Note we set allowWildcards to false in case the name has - // // a * in it - // calcCopyInfo(b, cmdName, cInfos, fileInfo.Name(), destPath, allowRemote, allowDecompression, false) - // } - // return nil - // } - - // Must be a dir or a file - - if err := checkPathForAddition(b, origPath); err != nil { - return err - } - fi, _ := os.Stat(filepath.Join(b.cfg.ContextDir, origPath)) - - ci := copyInfo{} - ci.origPath = origPath - ci.hash = origPath - ci.destPath = destPath - ci.decompress = allowDecompression - *cInfos = append(*cInfos, &ci) - - // Deal with the single file case - if !fi.IsDir() { - r, err := archive.Tar(ci.origPath, archive.Uncompressed) - if err != nil { - return err - } - tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version1) + for _, pattern := range includes { + + matches, err := filepath.Glob(filepath.Join(srcPath, pattern)) if err != nil { - return err + return result, err } - if _, err := io.Copy(ioutil.Discard, tarSum); err != nil { - return err + + for _, match := range matches { + + // We need to check if the current match is dir + // to prefix files inside with it + matchInfo, err := os.Stat(match) + if err != nil { + return result, err + } + + // Walk through each match since it may be a directory + err = filepath.Walk(match, func(path string, info os.FileInfo, err error) error { + + relFilePath, err := filepath.Rel(srcPath, path) + if err != nil { + return err + } + + // TODO: ensure explicit include does not get excluded by the following rule + // TODO: ensure ignoring works correctly, maybe improve .dockerignore to work more like .gitignore? + + skip, err := fileutils.Matches(relFilePath, excludes) + if err != nil { + return err + } + if skip { + return nil + } + + // TODO: read links? + + // skip checking if symlinks point to non-existing file + // also skip named pipes, because they hanging on open + if info.Mode()&(os.ModeSymlink|os.ModeNamedPipe) != 0 { + return nil + } + + // not interested in dirs, since we walk already + if info.IsDir() { + return nil + } + + if _, ok := seen[relFilePath]; ok { + return nil + } + seen[relFilePath] = struct{}{} + + // cut the wildcard path of the file or use base name + var resultFilePath string + if containsWildcards(pattern) { + common := commonPrefix(pattern, relFilePath) + resultFilePath = strings.Replace(relFilePath, common, "", 1) + } else if matchInfo.IsDir() { + common := commonPrefix(pattern, match) + resultFilePath = strings.Replace(relFilePath, common, "", 1) + } else { + resultFilePath = filepath.Base(relFilePath) + } + + result = append(result, &uploadFile{ + src: path, + dest: resultFilePath, + size: info.Size(), + }) + + return nil + }) + + if err != nil { + return result, err + } } - ci.hash = "file:" + tarSum.Sum(nil) - r.Close() - - // This will match first file in sums of the archive - // fis := b.context.GetSums().GetFile(ci.origPath) - // if fis != nil { - // ci.hash = "file:" + fis.Sum() - // } - return nil } - // TODO: tarsum for dirs - // NewTarWithOptions might do the trick - - // Must be a dir - // var subfiles []string - // absOrigPath := filepath.Join(b.cfg.ContextDir, ci.origPath) - - // // Add a trailing / to make sure we only pick up nested files under - // // the dir and not sibling files of the dir that just happen to - // // start with the same chars - // if !strings.HasSuffix(absOrigPath, string(os.PathSeparator)) { - // absOrigPath += string(os.PathSeparator) - // } - - // // Need path w/o slash too to find matching dir w/o trailing slash - // absOrigPathNoSlash := absOrigPath[:len(absOrigPath)-1] - - // for _, fileInfo := range b.context.GetSums() { - // absFile := filepath.Join(b.contextPath, fileInfo.Name()) - // // Any file in the context that starts with the given path will be - // // picked up and its hashcode used. However, we'll exclude the - // // root dir itself. We do this for a coupel of reasons: - // // 1 - ADD/COPY will not copy the dir itself, just its children - // // so there's no reason to include it in the hash calc - // // 2 - the metadata on the dir will change when any child file - // // changes. This will lead to a miss in the cache check if that - // // child file is in the .dockerignore list. - // if strings.HasPrefix(absFile, absOrigPath) && absFile != absOrigPathNoSlash { - // subfiles = append(subfiles, fileInfo.Sum()) - // } - // } - // sort.Strings(subfiles) - // hasher := sha256.New() - // hasher.Write([]byte(strings.Join(subfiles, ","))) - // ci.hash = "dir:" + hex.EncodeToString(hasher.Sum(nil)) - - return nil + return result, nil } func containsWildcards(name string) bool { @@ -353,20 +290,19 @@ func containsWildcards(name string) bool { return false } -func checkPathForAddition(b *Build, orig string) error { - origPath := filepath.Join(b.cfg.ContextDir, orig) - origPath, err := filepath.EvalSymlinks(origPath) - if err != nil { - if os.IsNotExist(err) { - return fmt.Errorf("%s: no such file or directory", orig) - } - return err +func commonPrefix(a, b string) (prefix string) { + // max length of either a or b + l := len(a) + if len(b) > l { + l = len(b) } - if _, err := os.Stat(origPath); err != nil { - if os.IsNotExist(err) { - return fmt.Errorf("%s: no such file or directory", orig) + // find common prefix + for i := 0; i < l; i++ { + if a[i] != b[i] { + break } - return err + // not optimal, but I don't care + prefix = prefix + string(a[i]) } - return nil + return } diff --git a/src/rocker/build2/copy2.go b/src/rocker/build2/copy2.go deleted file mode 100644 index f174891f..00000000 --- a/src/rocker/build2/copy2.go +++ /dev/null @@ -1,154 +0,0 @@ -/*- - * Copyright 2015 Grammarly, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package build2 - -import ( - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - - "github.com/docker/docker/pkg/tarsum" - "github.com/fsouza/go-dockerclient/vendor/github.com/docker/docker/pkg/archive" - "github.com/fsouza/go-dockerclient/vendor/github.com/docker/docker/pkg/fileutils" -) - -func copyFiles(b *Build, args []string, cmdName string) (s State, err error) { - - s = b.state - - if len(args) < 2 { - return s, fmt.Errorf("Invalid %s format - at least two arguments required", cmdName) - } - - // TODO: do we need to check the dest is always a directory? - - var ( - tar io.ReadCloser - tarSum tarsum.TarSum - src = args[0 : len(args)-1] - dest = filepath.FromSlash(args[len(args)-1]) // last one is always the dest - - // TODO: read .dockerignore - excludes = []string{} - ) - - if tar, err = makeTarStream(b.cfg.ContextDir, src, excludes); err != nil { - return s, err - } - - if tarSum, err = tarsum.NewTarSum(tar, true, tarsum.Version1); err != nil { - return s, err - } - if _, err = io.Copy(ioutil.Discard, tarSum); err != nil { - return s, err - } - tar.Close() - - message := fmt.Sprintf("%s %s to %s", cmdName, tarSum.Sum(nil), dest) - s.commitMsg = append(s.commitMsg, message) - - origCmd := s.config.Cmd - s.config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + message} - - if s.containerID, err = b.client.CreateContainer(s); err != nil { - return s, err - } - - s.config.Cmd = origCmd - - // We need to make a new tar stream, because the previous one has been - // read by the tarsum; maybe, optimize this in future - if tar, err = makeTarStream(b.cfg.ContextDir, src, excludes); err != nil { - return s, err - } - defer tar.Close() - - if err = b.client.UploadToContainer(s.containerID, tar, dest); err != nil { - return s, err - } - - return s, nil -} - -func makeTarStream(srcPath string, includes, excludes []string) (tar io.ReadCloser, err error) { - - if includes, err = expandIncludes(srcPath, includes, excludes); err != nil { - return nil, err - } - - tarOpts := &archive.TarOptions{ - IncludeFiles: includes, - ExcludePatterns: excludes, - Compression: archive.Uncompressed, - NoLchown: true, - } - - return archive.TarWithOptions(srcPath, tarOpts) -} - -func expandIncludes(srcPath string, includes, excludes []string) (result []string, err error) { - result = []string{} - - for _, filePath := range includes { - - matches, err := filepath.Glob(filepath.Join(srcPath, filePath)) - if err != nil { - return result, err - } - - for _, match := range matches { - - relFilePath, err := filepath.Rel(srcPath, match) - if err != nil { - return result, err - } - - skip, err := fileutils.Matches(relFilePath, excludes) - if err != nil { - return result, err - } - if skip { - continue - } - - f, err := os.Stat(match) - if err != nil { - return result, err - } - - // skip checking if symlinks point to non-existing file - // also skip named pipes, because they hanging on open - if f.Mode()&(os.ModeSymlink|os.ModeNamedPipe) != 0 { - continue - } - - if !f.IsDir() { - currentFile, err := os.Open(filePath) - if err != nil && os.IsPermission(err) { - return result, fmt.Errorf("no permission to read from '%s'", filePath) - } - currentFile.Close() - } - - result = append(result, relFilePath) - } - } - - return result, nil -} diff --git a/src/rocker/build2/copy2_test.go b/src/rocker/build2/copy2_test.go deleted file mode 100644 index c4c36cc8..00000000 --- a/src/rocker/build2/copy2_test.go +++ /dev/null @@ -1,88 +0,0 @@ -/*- - * Copyright 2015 Grammarly, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package build2 - -import ( - "bytes" - "io" - "io/ioutil" - "os" - "testing" - - "github.com/docker/docker/pkg/tarsum" - "github.com/kr/pretty" -) - -func TestMakeTarStream_Basic(t *testing.T) { - wd, err := os.Getwd() - if err != nil { - t.Fatal(err) - } - includes := []string{ - "testdata/*.txt", - } - excludes := []string{ - "testdata/*.tar", - "testdata/*.txt2", - } - - stream, err := makeTarStream(wd, includes, excludes) - if err != nil { - t.Fatal(err) - } - data, err := ioutil.ReadAll(stream) - if err != nil { - t.Fatal(err) - } - - tarSum, err := tarsum.NewTarSum(bytes.NewReader(data), true, tarsum.Version1) - if err != nil { - t.Fatal(err) - } - if _, err := io.Copy(ioutil.Discard, tarSum); err != nil { - t.Fatal(err) - } - println("tarsum:" + tarSum.Sum(nil)) - - if err := ioutil.WriteFile("testdata/file.tar", data, 0644); err != nil { - t.Fatal(err) - } - - println("Written to testdata/file.tar") - -} - -func TestExpandIncludes_Basic(t *testing.T) { - wd, err := os.Getwd() - if err != nil { - t.Fatal(err) - } - includes := []string{ - "testdata", - } - excludes := []string{ - "testdata/*.tar", - "testdata/*.txt2", - } - - matches, err := expandIncludes(wd, includes, excludes) - if err != nil { - t.Fatal(err) - } - - pretty.Println(matches) -} diff --git a/src/rocker/build2/copy_test.go b/src/rocker/build2/copy_test.go new file mode 100644 index 00000000..98216b73 --- /dev/null +++ b/src/rocker/build2/copy_test.go @@ -0,0 +1,341 @@ +/*- + * Copyright 2015 Grammarly, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package build2 + +import ( + "bytes" + "io" + "io/ioutil" + "os" + "os/exec" + "rocker/test" + "strings" + "testing" + + "github.com/kr/pretty" + "github.com/stretchr/testify/assert" + + "github.com/docker/docker/pkg/tarsum" +) + +func TestListFiles_Basic(t *testing.T) { + tmpDir := makeTmpDir(t, map[string]string{ + "file1.txt": "hello", + }) + defer os.RemoveAll(tmpDir) + + includes := []string{ + "file1.txt", + } + excludes := []string{} + + matches, err := listFiles(tmpDir, includes, excludes) + if err != nil { + t.Fatal(err) + } + + t.Logf("includes: %# v", pretty.Formatter(includes)) + t.Logf("excludes: %# v", pretty.Formatter(excludes)) + t.Logf("matches: %# v", pretty.Formatter(matches)) + + assertions := [][2]string{ + {tmpDir + "/file1.txt", "file1.txt"}, + } + + assert.Len(t, matches, len(assertions)) + for i, a := range assertions { + assert.Equal(t, a[0], matches[i].src, "bad match src at index %d", i) + assert.Equal(t, a[1], matches[i].dest, "bad match dest at index %d", i) + } +} + +func TestListFiles_Wildcard(t *testing.T) { + tmpDir := makeTmpDir(t, map[string]string{ + "file1.txt": "hello", + "file2.txt": "hello", + }) + defer os.RemoveAll(tmpDir) + + includes := []string{ + "*.txt", + } + excludes := []string{} + + matches, err := listFiles(tmpDir, includes, excludes) + if err != nil { + t.Fatal(err) + } + + t.Logf("includes: %# v", pretty.Formatter(includes)) + t.Logf("excludes: %# v", pretty.Formatter(excludes)) + t.Logf("matches: %# v", pretty.Formatter(matches)) + + assertions := [][2]string{ + {tmpDir + "/file1.txt", "file1.txt"}, + {tmpDir + "/file2.txt", "file2.txt"}, + } + + assert.Len(t, matches, len(assertions)) + for i, a := range assertions { + assert.Equal(t, a[0], matches[i].src, "bad match src at index %d", i) + assert.Equal(t, a[1], matches[i].dest, "bad match dest at index %d", i) + } +} + +func TestListFiles_Dir_Simple(t *testing.T) { + tmpDir := makeTmpDir(t, map[string]string{ + "dir/foo.txt": "hello", + "dir/bar.txt": "hello", + }) + defer os.RemoveAll(tmpDir) + + includes := []string{ + "dir", + } + excludes := []string{} + + matches, err := listFiles(tmpDir, includes, excludes) + if err != nil { + t.Fatal(err) + } + + t.Logf("includes: %# v", pretty.Formatter(includes)) + t.Logf("excludes: %# v", pretty.Formatter(excludes)) + t.Logf("matches: %# v", pretty.Formatter(matches)) + + assertions := [][2]string{ + {tmpDir + "/dir/bar.txt", "dir/bar.txt"}, + {tmpDir + "/dir/foo.txt", "dir/foo.txt"}, + } + + assert.Len(t, matches, len(assertions)) + for i, a := range assertions { + assert.Equal(t, a[0], matches[i].src, "bad match src at index %d", i) + assert.Equal(t, a[1], matches[i].dest, "bad match dest at index %d", i) + } +} + +func TestListFiles_Dir_AndFiles(t *testing.T) { + tmpDir := makeTmpDir(t, map[string]string{ + "test.txt": "hello", + "dir/foo.txt": "hello", + "dir/bar.txt": "hello", + }) + defer os.RemoveAll(tmpDir) + + includes := []string{ + ".", + } + excludes := []string{} + + matches, err := listFiles(tmpDir, includes, excludes) + if err != nil { + t.Fatal(err) + } + + t.Logf("includes: %# v", pretty.Formatter(includes)) + t.Logf("excludes: %# v", pretty.Formatter(excludes)) + t.Logf("matches: %# v", pretty.Formatter(matches)) + + assertions := [][2]string{ + {tmpDir + "/dir/bar.txt", "dir/bar.txt"}, + {tmpDir + "/dir/foo.txt", "dir/foo.txt"}, + {tmpDir + "/test.txt", "test.txt"}, + } + + assert.Len(t, matches, len(assertions)) + for i, a := range assertions { + assert.Equal(t, a[0], matches[i].src, "bad match src at index %d", i) + assert.Equal(t, a[1], matches[i].dest, "bad match dest at index %d", i) + } +} + +func TestListFiles_Dir_Multi(t *testing.T) { + tmpDir := makeTmpDir(t, map[string]string{ + "a/test.txt": "hello", + "b/1.txt": "hello", + "b/2.txt": "hello", + "c/foo.txt": "hello", + "c/x/1.txt": "hello", + "c/x/2.txt": "hello", + }) + defer os.RemoveAll(tmpDir) + + includes := []string{ + "a", + "b/2.txt", + "c", + } + excludes := []string{} + + matches, err := listFiles(tmpDir, includes, excludes) + if err != nil { + t.Fatal(err) + } + + t.Logf("includes: %# v", pretty.Formatter(includes)) + t.Logf("excludes: %# v", pretty.Formatter(excludes)) + t.Logf("matches: %# v", pretty.Formatter(matches)) + + assertions := [][2]string{ + {tmpDir + "/a/test.txt", "a/test.txt"}, + {tmpDir + "/b/2.txt", "2.txt"}, + {tmpDir + "/c/foo.txt", "c/foo.txt"}, + {tmpDir + "/c/x/1.txt", "c/x/1.txt"}, + {tmpDir + "/c/x/2.txt", "c/x/2.txt"}, + } + + assert.Len(t, matches, len(assertions)) + for i, a := range assertions { + assert.Equal(t, a[0], matches[i].src, "bad match src at index %d", i) + assert.Equal(t, a[1], matches[i].dest, "bad match dest at index %d", i) + } +} + +func TestMakeTarStream_Basic(t *testing.T) { + tmpDir := makeTmpDir(t, map[string]string{ + "a/test.txt": "hello", + "b/1.txt": "hello", + "b/2.txt": "hello", + "c/foo.txt": "hello", + "c/x/1.txt": "hello", + "c/x/2.txt": "hello", + }) + defer os.RemoveAll(tmpDir) + + includes := []string{ + "a", + "b/2.txt", + "c", + } + excludes := []string{} + dest := "/" + + stream, err := makeTarStream(tmpDir, dest, "COPY", includes, excludes) + if err != nil { + t.Fatal(err) + } + + out := writeReadTar(t, tmpDir, stream.tar) + + assertion := strings.Join([]string{ + "a/test.txt", + "2.txt", + "c/foo.txt", + "c/x/1.txt", + "c/x/2.txt", + }, "\n") + "\n" + + assert.Equal(t, assertion, out, "bad tar content") +} + +func TestMakeTarStream_Rename(t *testing.T) { + tmpDir := makeTmpDir(t, map[string]string{ + "a/test.txt": "hello", + }) + defer os.RemoveAll(tmpDir) + + includes := []string{ + "a/test.txt", + } + excludes := []string{} + dest := "/src/x.txt" + + stream, err := makeTarStream(tmpDir, dest, "COPY", includes, excludes) + if err != nil { + t.Fatal(err) + } + + out := writeReadTar(t, tmpDir, stream.tar) + + assertion := strings.Join([]string{ + "src/x.txt", + }, "\n") + "\n" + + assert.Equal(t, assertion, out, "bad tar content") +} + +func TestMakeTarStream_OneFileToDir(t *testing.T) { + tmpDir := makeTmpDir(t, map[string]string{ + "a/test.txt": "hello", + }) + defer os.RemoveAll(tmpDir) + + includes := []string{ + "a/test.txt", + } + excludes := []string{} + dest := "/src/" + + stream, err := makeTarStream(tmpDir, dest, "COPY", includes, excludes) + if err != nil { + t.Fatal(err) + } + + out := writeReadTar(t, tmpDir, stream.tar) + + assertion := strings.Join([]string{ + "src/test.txt", + }, "\n") + "\n" + + assert.Equal(t, assertion, out, "bad tar content") +} + +// helper functions + +func makeTmpDir(t *testing.T, files map[string]string) string { + tmpDir, err := ioutil.TempDir("", "rocker-copy-test") + if err != nil { + t.Fatal(err) + } + if err := test.MakeFiles(tmpDir, files); err != nil { + os.RemoveAll(tmpDir) + t.Fatal(err) + } + t.Logf("temp files: %# v", pretty.Formatter(files)) + return tmpDir +} + +func writeReadTar(t *testing.T, tmpDir string, tarStream io.ReadCloser) string { + data, err := ioutil.ReadAll(tarStream) + if err != nil { + t.Fatal(err) + } + defer tarStream.Close() + + tarSum, err := tarsum.NewTarSum(bytes.NewReader(data), true, tarsum.Version1) + if err != nil { + t.Fatal(err) + } + if _, err := io.Copy(ioutil.Discard, tarSum); err != nil { + t.Fatal(err) + } + t.Logf("tarsum: %s", tarSum.Sum(nil)) + + if err := ioutil.WriteFile(tmpDir+"/archive.tar", data, 0644); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir + "/archive.tar") + + cmd := exec.Command("tar", "-tf", tmpDir+"/archive.tar") + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatal(err) + } + + return string(out) +} diff --git a/src/rocker/build2/tar.go b/src/rocker/build2/tar.go new file mode 100644 index 00000000..22cf80a9 --- /dev/null +++ b/src/rocker/build2/tar.go @@ -0,0 +1,118 @@ +// NOTICE: +// it was originally grabbed from the docker source and +// adopted for use by rocker; see LICENSE in the current +// directory from the license and the copyright. +// +// Copyright 2013-2015 Docker, Inc. + +package build2 + +import ( + "archive/tar" + "bufio" + "fmt" + "io" + "os" + "strings" + + "github.com/fsouza/go-dockerclient/vendor/github.com/docker/docker/pkg/system" +) + +type tarAppender struct { + TarWriter *tar.Writer + Buffer *bufio.Writer + + // for hardlink mapping + SeenFiles map[uint64]string +} + +// canonicalTarName provides a platform-independent and consistent posix-style +//path for files and directories to be archived regardless of the platform. +func canonicalTarName(name string, isDir bool) (string, error) { + name, err := CanonicalTarNameForPath(name) + if err != nil { + return "", err + } + + // suffix with '/' for directories + if isDir && !strings.HasSuffix(name, "/") { + name += "/" + } + return name, nil +} + +func (ta *tarAppender) addTarFile(path, name string) error { + fi, err := os.Lstat(path) + if err != nil { + return err + } + + link := "" + if fi.Mode()&os.ModeSymlink != 0 { + if link, err = os.Readlink(path); err != nil { + return err + } + } + + hdr, err := tar.FileInfoHeader(fi, link) + if err != nil { + return err + } + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + + name, err = canonicalTarName(name, fi.IsDir()) + if err != nil { + return fmt.Errorf("tar: cannot canonicalize path: %v", err) + } + hdr.Name = name + + nlink, inode, err := setHeaderForSpecialDevice(hdr, ta, name, fi.Sys()) + if err != nil { + return err + } + + // if it's a regular file and has more than 1 link, + // it's hardlinked, so set the type flag accordingly + if fi.Mode().IsRegular() && nlink > 1 { + // a link should have a name that it links too + // and that linked name should be first in the tar archive + if oldpath, ok := ta.SeenFiles[inode]; ok { + hdr.Typeflag = tar.TypeLink + hdr.Linkname = oldpath + hdr.Size = 0 // This Must be here for the writer math to add up! + } else { + ta.SeenFiles[inode] = name + } + } + + capability, _ := system.Lgetxattr(path, "security.capability") + if capability != nil { + hdr.Xattrs = make(map[string]string) + hdr.Xattrs["security.capability"] = string(capability) + } + + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + + if hdr.Typeflag == tar.TypeReg { + file, err := os.Open(path) + if err != nil { + return err + } + + ta.Buffer.Reset(ta.TarWriter) + defer ta.Buffer.Reset(nil) + _, err = io.Copy(ta.Buffer, file) + file.Close() + if err != nil { + return err + } + err = ta.Buffer.Flush() + if err != nil { + return err + } + } + + return nil +} diff --git a/src/rocker/build2/tar_unix.go b/src/rocker/build2/tar_unix.go new file mode 100644 index 00000000..ee64d392 --- /dev/null +++ b/src/rocker/build2/tar_unix.go @@ -0,0 +1,57 @@ +// +build !windows + +// NOTICE: +// it was originally grabbed from the docker source and +// adopted for use by rocker; see LICENSE in the current +// directory from the license and the copyright. +// +// Copyright 2013-2015 Docker, Inc. + +package build2 + +import ( + "archive/tar" + "errors" + "os" + "syscall" +) + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) (string, error) { + return p, nil // already unix-style +} + +func chmodTarEntry(perm os.FileMode) os.FileMode { + return perm // noop for unix as golang APIs provide perm bits correctly +} + +func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (nlink uint32, inode uint64, err error) { + s, ok := stat.(*syscall.Stat_t) + + if !ok { + err = errors.New("cannot convert stat value to syscall.Stat_t") + return + } + + nlink = uint32(s.Nlink) + inode = uint64(s.Ino) + + // Currently go does not fil in the major/minors + if s.Mode&syscall.S_IFBLK != 0 || + s.Mode&syscall.S_IFCHR != 0 { + hdr.Devmajor = int64(major(uint64(s.Rdev))) + hdr.Devminor = int64(minor(uint64(s.Rdev))) + } + + return +} + +func major(device uint64) uint64 { + return (device >> 8) & 0xfff +} + +func minor(device uint64) uint64 { + return (device & 0xff) | ((device >> 12) & 0xfff00) +} diff --git a/src/rocker/build2/tar_windows.go b/src/rocker/build2/tar_windows.go new file mode 100644 index 00000000..8229b728 --- /dev/null +++ b/src/rocker/build2/tar_windows.go @@ -0,0 +1,56 @@ +// +build windows + +// NOTICE: +// it was originally grabbed from the docker source and +// adopted for use by rocker; see LICENSE in the current +// directory from the license and the copyright. +// +// Copyright 2013-2015 Docker, Inc. + +package build2 + +import ( + "archive/tar" + "fmt" + "os" + "strings" +) + +// canonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) (string, error) { + // windows: convert windows style relative path with backslashes + // into forward slashes. Since windows does not allow '/' or '\' + // in file names, it is mostly safe to replace however we must + // check just in case + if strings.Contains(p, "/") { + return "", fmt.Errorf("Windows path contains forward slash: %s", p) + } + return strings.Replace(p, string(os.PathSeparator), "/", -1), nil +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. +func chmodTarEntry(perm os.FileMode) os.FileMode { + perm &= 0755 + // Add the x bit: make everything +x from windows + perm |= 0111 + + return perm +} + +func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (nlink uint32, inode uint64, err error) { + // do nothing. no notion of Rdev, Inode, Nlink in stat on Windows + return +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + return nil +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { + return nil +} diff --git a/src/rocker/build2/testdata/file.txt b/src/rocker/build2/testdata/file.txt deleted file mode 100644 index ce013625..00000000 --- a/src/rocker/build2/testdata/file.txt +++ /dev/null @@ -1 +0,0 @@ -hello From a4b236f8f75dd6285064d2444183003731a9fbf9 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Wed, 23 Sep 2015 07:51:18 +0300 Subject: [PATCH 026/131] ADD as an alias of COPY --- src/rocker/build2/commands.go | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/src/rocker/build2/commands.go b/src/rocker/build2/commands.go index 29dc1ca5..ac2da42c 100644 --- a/src/rocker/build2/commands.go +++ b/src/rocker/build2/commands.go @@ -57,6 +57,8 @@ func NewCommand(cfg ConfigCommand) (Command, error) { return &CommandTag{cfg}, nil case "copy": return &CommandCopy{cfg}, nil + case "add": + return &CommandAdd{cfg}, nil case "cmd": return &CommandCmd{cfg}, nil } @@ -344,3 +346,20 @@ func (c *CommandCopy) Execute(b *Build) (State, error) { } return copyFiles(b, c.cfg.args, "COPY") } + +// CommandCopy implements ADD +// For now it is an alias of COPY, but later will add urls and archives to it +type CommandAdd struct { + cfg ConfigCommand +} + +func (c *CommandAdd) String() string { + return c.cfg.original +} + +func (c *CommandAdd) Execute(b *Build) (State, error) { + if len(c.cfg.args) < 2 { + return b.state, fmt.Errorf("ADD requires at least two arguments") + } + return copyFiles(b, c.cfg.args, "ADD") +} From 09dbbd003374a059cf3d8777866181482b8483cb Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Wed, 23 Sep 2015 08:01:58 +0300 Subject: [PATCH 027/131] LABEL impl --- src/rocker/build2/commands.go | 46 ++++++++++++++++++++++++++ src/rocker/build2/commands_test.go | 53 ++++++++++++++++++++++++++++++ 2 files changed, 99 insertions(+) diff --git a/src/rocker/build2/commands.go b/src/rocker/build2/commands.go index ac2da42c..9c3646a1 100644 --- a/src/rocker/build2/commands.go +++ b/src/rocker/build2/commands.go @@ -53,6 +53,8 @@ func NewCommand(cfg ConfigCommand) (Command, error) { return &CommandRun{cfg}, nil case "env": return &CommandEnv{cfg}, nil + case "label": + return &CommandLabel{cfg}, nil case "tag": return &CommandTag{cfg}, nil case "copy": @@ -288,6 +290,50 @@ func (c *CommandEnv) Execute(b *Build) (s State, err error) { return s, nil } +// CommandEnv implements LABEL +type CommandLabel struct { + cfg ConfigCommand +} + +func (c *CommandLabel) String() string { + return c.cfg.original +} + +func (c *CommandLabel) Execute(b *Build) (s State, err error) { + + s = b.state + args := c.cfg.args + + if len(args) == 0 { + return s, fmt.Errorf("LABEL requires at least one argument") + } + + if len(args)%2 != 0 { + // should never get here, but just in case + return s, fmt.Errorf("Bad input to LABEL, too many args") + } + + commitStr := "LABEL" + + if s.config.Labels == nil { + s.config.Labels = map[string]string{} + } + + for j := 0; j < len(args); j++ { + // name ==> args[j] + // value ==> args[j+1] + newVar := args[j] + "=" + args[j+1] + "" + commitStr += " " + newVar + + s.config.Labels[args[j]] = args[j+1] + j++ + } + + s.commitMsg = append(s.commitMsg, commitStr) + + return s, nil +} + // CommandCmd implements CMD type CommandCmd struct { cfg ConfigCommand diff --git a/src/rocker/build2/commands_test.go b/src/rocker/build2/commands_test.go index ce8a3633..c2214129 100644 --- a/src/rocker/build2/commands_test.go +++ b/src/rocker/build2/commands_test.go @@ -17,6 +17,7 @@ package build2 import ( + "reflect" "testing" "github.com/kr/pretty" @@ -249,6 +250,58 @@ func TestCommandEnv_Advanced(t *testing.T) { assert.Equal(t, []string{"env=prod", "version=1.2.3", "type=web"}, state.config.Env) } +// =========== Testing LABEL =========== + +func TestCommandLabel_Simple(t *testing.T) { + b, _ := makeBuild(t, "", Config{}) + cmd := &CommandLabel{ConfigCommand{ + args: []string{"type", "web", "env", "prod"}, + }} + + state, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + expectedLabels := map[string]string{ + "type": "web", + "env": "prod", + } + + t.Logf("Result labels: %# v", pretty.Formatter(state.config.Labels)) + + assert.Equal(t, []string{"LABEL type=web env=prod"}, state.commitMsg) + assert.True(t, reflect.DeepEqual(state.config.Labels, expectedLabels), "bad result labels") +} + +func TestCommandLabel_Advanced(t *testing.T) { + b, _ := makeBuild(t, "", Config{}) + cmd := &CommandLabel{ConfigCommand{ + args: []string{"type", "web", "env", "prod"}, + }} + + b.state.config.Labels = map[string]string{ + "env": "dev", + "version": "1.2.3", + } + + state, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + expectedLabels := map[string]string{ + "type": "web", + "version": "1.2.3", + "env": "prod", + } + + t.Logf("Result labels: %# v", pretty.Formatter(state.config.Labels)) + + assert.Equal(t, []string{"LABEL type=web env=prod"}, state.commitMsg) + assert.True(t, reflect.DeepEqual(state.config.Labels, expectedLabels), "bad result labels") +} + // =========== Testing CMD =========== func TestCommandCmd_Simple(t *testing.T) { From da0aa93c6798f577b8f8a9ef34c087b2316d418b Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Wed, 23 Sep 2015 08:22:57 +0300 Subject: [PATCH 028/131] refactor committing and State --- src/rocker/build2/build.go | 20 ++++--- src/rocker/build2/client.go | 10 ++-- src/rocker/build2/commands.go | 87 ++++++++++++++++-------------- src/rocker/build2/commands_test.go | 84 ++++++++++++++--------------- src/rocker/build2/copy.go | 14 ++--- 5 files changed, 116 insertions(+), 99 deletions(-) diff --git a/src/rocker/build2/build.go b/src/rocker/build2/build.go index a5962c72..91d66018 100644 --- a/src/rocker/build2/build.go +++ b/src/rocker/build2/build.go @@ -40,11 +40,11 @@ type Config struct { } type State struct { - config docker.Config - imageID string - containerID string - commitMsg []string - skipCommit bool + Config docker.Config + ImageID string + ContainerID string + CommitMsg []string + ProducedImage bool } type Build struct { @@ -86,5 +86,13 @@ func (b *Build) GetState() State { } func (b *Build) GetImageID() string { - return b.state.imageID + return b.state.ImageID +} + +func (s *State) Commit(msg string) { + s.CommitMsg = append(s.CommitMsg, msg) +} + +func (s *State) SkipCommit() { + s.Commit(COMMIT_SKIP) } diff --git a/src/rocker/build2/client.go b/src/rocker/build2/client.go index d52c783d..f519730d 100644 --- a/src/rocker/build2/client.go +++ b/src/rocker/build2/client.go @@ -124,12 +124,12 @@ func (c *DockerClient) CreateContainer(s State) (string, error) { // volumesFrom := builder.getMountContainerIds() // binds := builder.getBinds() - s.config.Image = s.imageID + s.Config.Image = s.ImageID // TODO: assign human readable name? opts := docker.CreateContainerOptions{ - Config: &s.config, + Config: &s.Config, HostConfig: &docker.HostConfig{ // Binds: binds, // VolumesFrom: volumesFrom, @@ -143,7 +143,7 @@ func (c *DockerClient) CreateContainer(s State) (string, error) { return "", err } - log.Infof("| Created container %.12s (image %.12s)", container.ID, s.imageID) + log.Infof("| Created container %.12s (image %.12s)", container.ID, s.ImageID) return container.ID, nil } @@ -276,9 +276,9 @@ func (c *DockerClient) RunContainer(containerID string, attachStdin bool) error func (c *DockerClient) CommitContainer(s State, message string) (string, error) { commitOpts := docker.CommitContainerOptions{ - Container: s.containerID, + Container: s.ContainerID, Message: message, - Run: &s.config, + Run: &s.Config, } log.Debugf("Commit container: %# v", pretty.Formatter(commitOpts)) diff --git a/src/rocker/build2/commands.go b/src/rocker/build2/commands.go index 9c3646a1..1b975696 100644 --- a/src/rocker/build2/commands.go +++ b/src/rocker/build2/commands.go @@ -25,6 +25,10 @@ import ( "github.com/fsouza/go-dockerclient" ) +const ( + COMMIT_SKIP = "COMMIT_SKIP" +) + type ConfigCommand struct { name string args []string @@ -116,8 +120,8 @@ func (c *CommandFrom) Execute(b *Build) (s State, err error) { }).Infof("| Image %.12s", img.ID) s = b.state - s.imageID = img.ID - s.config = *img.Config + s.ImageID = img.ID + s.Config = *img.Config return s, nil } @@ -135,15 +139,15 @@ func (c *CommandCleanup) String() string { func (c *CommandCleanup) Execute(b *Build) (State, error) { s := b.state - if b.cfg.NoGarbage && !c.tagged && s.imageID != "" { - if err := b.client.RemoveImage(s.imageID); err != nil { + if b.cfg.NoGarbage && !c.tagged && s.ImageID != "" && s.ProducedImage { + if err := b.client.RemoveImage(s.ImageID); err != nil { return s, err } } // For final cleanup we want to keep imageID if !c.final { - s.imageID = "" + s.ImageID = "" } return s, nil @@ -159,41 +163,46 @@ func (c *CommandCommit) String() string { func (c *CommandCommit) Execute(b *Build) (s State, err error) { s = b.state - if s.skipCommit { - s.skipCommit = false - log.Infof("| Skip") - return s, nil + // Collect commits that are not skipped + commits := []string{} + for _, msg := range s.CommitMsg { + if msg != COMMIT_SKIP { + commits = append(commits, msg) + } } - message := strings.Join(s.commitMsg, "; ") - // Reset collected commit messages after the commit - s.commitMsg = []string{} + s.CommitMsg = []string{} - if s.containerID == "" { - if message == "" { - return s, fmt.Errorf("Nothing to commit, this might be a bug.") - } + if len(commits) == 0 { + log.Infof("| Skip") + return s, nil + } + + message := strings.Join(commits, "; ") - origCmd := s.config.Cmd - s.config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + message} + if s.ContainerID == "" { + origCmd := s.Config.Cmd + s.Config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + message} - if s.containerID, err = b.client.CreateContainer(s); err != nil { + if s.ContainerID, err = b.client.CreateContainer(s); err != nil { return s, err } - s.config.Cmd = origCmd + s.Config.Cmd = origCmd } - if s.imageID, err = b.client.CommitContainer(s, message); err != nil { + if s.ImageID, err = b.client.CommitContainer(s, message); err != nil { return s, err } - if err = b.client.RemoveContainer(s.containerID); err != nil { + s.ProducedImage = true + + if err = b.client.RemoveContainer(s.ContainerID); err != nil { return s, err } - s.containerID = "" + s.ContainerID = "" return s, nil } @@ -210,7 +219,7 @@ func (c *CommandRun) String() string { func (c *CommandRun) Execute(b *Build) (s State, err error) { s = b.state - if s.imageID == "" { + if s.ImageID == "" { return s, fmt.Errorf("Please provide a source image with `FROM` prior to run") } @@ -223,19 +232,19 @@ func (c *CommandRun) Execute(b *Build) (s State, err error) { // TODO: test with ENTRYPOINT // We run this command in the container using CMD - origCmd := s.config.Cmd - s.config.Cmd = cmd + origCmd := s.Config.Cmd + s.Config.Cmd = cmd - if s.containerID, err = b.client.CreateContainer(s); err != nil { + if s.ContainerID, err = b.client.CreateContainer(s); err != nil { return s, err } - if err = b.client.RunContainer(s.containerID, false); err != nil { + if err = b.client.RunContainer(s.ContainerID, false); err != nil { return s, err } // Restore command after commit - s.config.Cmd = origCmd + s.Config.Cmd = origCmd return s, nil } @@ -272,20 +281,20 @@ func (c *CommandEnv) Execute(b *Build) (s State, err error) { commitStr += " " + newVar gotOne := false - for i, envVar := range s.config.Env { + for i, envVar := range s.Config.Env { envParts := strings.SplitN(envVar, "=", 2) if envParts[0] == args[j] { - s.config.Env[i] = newVar + s.Config.Env[i] = newVar gotOne = true break } } if !gotOne { - s.config.Env = append(s.config.Env, newVar) + s.Config.Env = append(s.Config.Env, newVar) } } - s.commitMsg = append(s.commitMsg, commitStr) + s.Commit(commitStr) return s, nil } @@ -315,8 +324,8 @@ func (c *CommandLabel) Execute(b *Build) (s State, err error) { commitStr := "LABEL" - if s.config.Labels == nil { - s.config.Labels = map[string]string{} + if s.Config.Labels == nil { + s.Config.Labels = map[string]string{} } for j := 0; j < len(args); j++ { @@ -325,11 +334,11 @@ func (c *CommandLabel) Execute(b *Build) (s State, err error) { newVar := args[j] + "=" + args[j+1] + "" commitStr += " " + newVar - s.config.Labels[args[j]] = args[j+1] + s.Config.Labels[args[j]] = args[j+1] j++ } - s.commitMsg = append(s.commitMsg, commitStr) + s.Commit(commitStr) return s, nil } @@ -352,9 +361,9 @@ func (c *CommandCmd) Execute(b *Build) (s State, err error) { cmd = append([]string{"/bin/sh", "-c"}, cmd...) } - s.config.Cmd = cmd + s.Config.Cmd = cmd - s.commitMsg = append(s.commitMsg, fmt.Sprintf("CMD %q", cmd)) + s.Commit(fmt.Sprintf("CMD %q", cmd)) // TODO: unsetting CMD? // if len(args) != 0 { diff --git a/src/rocker/build2/commands_test.go b/src/rocker/build2/commands_test.go index c2214129..28332ddd 100644 --- a/src/rocker/build2/commands_test.go +++ b/src/rocker/build2/commands_test.go @@ -50,8 +50,8 @@ func TestCommandFrom_Existing(t *testing.T) { } c.AssertExpectations(t) - assert.Equal(t, "123", state.imageID) - assert.Equal(t, "localhost", state.config.Hostname) + assert.Equal(t, "123", state.ImageID) + assert.Equal(t, "localhost", state.Config.Hostname) } func TestCommandFrom_PullExisting(t *testing.T) { @@ -76,8 +76,8 @@ func TestCommandFrom_PullExisting(t *testing.T) { } c.AssertExpectations(t) - assert.Equal(t, "123", state.imageID) - assert.Equal(t, "localhost", state.config.Hostname) + assert.Equal(t, "123", state.ImageID) + assert.Equal(t, "localhost", state.Config.Hostname) } func TestCommandFrom_NotExisting(t *testing.T) { @@ -103,7 +103,7 @@ func TestCommandFrom_NotExisting(t *testing.T) { } c.AssertExpectations(t) - assert.Equal(t, "123", state.imageID) + assert.Equal(t, "123", state.ImageID) } func TestCommandFrom_AfterPullNotExisting(t *testing.T) { @@ -131,12 +131,12 @@ func TestCommandRun_Simple(t *testing.T) { }} origCmd := []string{"/bin/program"} - b.state.config.Cmd = origCmd - b.state.imageID = "123" + b.state.Config.Cmd = origCmd + b.state.ImageID = "123" c.On("CreateContainer", mock.AnythingOfType("State")).Return("456", nil).Run(func(args mock.Arguments) { arg := args.Get(0).(State) - assert.Equal(t, []string{"/bin/sh", "-c", "whoami"}, arg.config.Cmd) + assert.Equal(t, []string{"/bin/sh", "-c", "whoami"}, arg.Config.Cmd) }).Once() c.On("RunContainer", "456", false).Return(nil).Once() @@ -147,10 +147,10 @@ func TestCommandRun_Simple(t *testing.T) { } c.AssertExpectations(t) - assert.Equal(t, origCmd, b.state.config.Cmd) - assert.Equal(t, origCmd, state.config.Cmd) - assert.Equal(t, "123", state.imageID) - assert.Equal(t, "456", state.containerID) + assert.Equal(t, origCmd, b.state.Config.Cmd) + assert.Equal(t, origCmd, state.Config.Cmd) + assert.Equal(t, "123", state.ImageID) + assert.Equal(t, "456", state.ContainerID) } // =========== Testing COMMIT =========== @@ -160,8 +160,8 @@ func TestCommandCommit_Simple(t *testing.T) { cmd := &CommandCommit{} origCommitMsg := []string{"a", "b"} - b.state.containerID = "456" - b.state.commitMsg = origCommitMsg + b.state.ContainerID = "456" + b.state.CommitMsg = origCommitMsg c.On("CommitContainer", mock.AnythingOfType("State"), "a; b").Return("789", nil).Once() c.On("RemoveContainer", "456").Return(nil).Once() @@ -172,11 +172,11 @@ func TestCommandCommit_Simple(t *testing.T) { } c.AssertExpectations(t) - assert.Equal(t, origCommitMsg, b.state.commitMsg) - assert.Equal(t, []string{}, state.commitMsg) - assert.Equal(t, []string(nil), state.config.Cmd) - assert.Equal(t, "789", state.imageID) - assert.Equal(t, "", state.containerID) + assert.Equal(t, origCommitMsg, b.state.CommitMsg) + assert.Equal(t, []string{}, state.CommitMsg) + assert.Equal(t, []string(nil), state.Config.Cmd) + assert.Equal(t, "789", state.ImageID) + assert.Equal(t, "", state.ContainerID) } func TestCommandCommit_NoContainer(t *testing.T) { @@ -184,11 +184,11 @@ func TestCommandCommit_NoContainer(t *testing.T) { cmd := &CommandCommit{} origCommitMsg := []string{"a", "b"} - b.state.commitMsg = origCommitMsg + b.state.CommitMsg = origCommitMsg c.On("CreateContainer", mock.AnythingOfType("State")).Return("456", nil).Run(func(args mock.Arguments) { arg := args.Get(0).(State) - assert.Equal(t, []string{"/bin/sh", "-c", "#(nop) a; b"}, arg.config.Cmd) + assert.Equal(t, []string{"/bin/sh", "-c", "#(nop) a; b"}, arg.Config.Cmd) }).Once() c.On("CommitContainer", mock.AnythingOfType("State"), "a; b").Return("789", nil).Once() @@ -200,10 +200,10 @@ func TestCommandCommit_NoContainer(t *testing.T) { } c.AssertExpectations(t) - assert.Equal(t, origCommitMsg, b.state.commitMsg) - assert.Equal(t, []string{}, state.commitMsg) - assert.Equal(t, "789", state.imageID) - assert.Equal(t, "", state.containerID) + assert.Equal(t, origCommitMsg, b.state.CommitMsg) + assert.Equal(t, []string{}, state.CommitMsg) + assert.Equal(t, "789", state.ImageID) + assert.Equal(t, "", state.ContainerID) } func TestCommandCommit_NoCommitMsgs(t *testing.T) { @@ -211,7 +211,7 @@ func TestCommandCommit_NoCommitMsgs(t *testing.T) { cmd := &CommandCommit{} _, err := cmd.Execute(b) - assert.Contains(t, err.Error(), "Nothing to commit") + assert.Nil(t, err) } // TODO: test skip commit @@ -229,8 +229,8 @@ func TestCommandEnv_Simple(t *testing.T) { t.Fatal(err) } - assert.Equal(t, []string{"ENV type=web env=prod"}, state.commitMsg) - assert.Equal(t, []string{"type=web", "env=prod"}, state.config.Env) + assert.Equal(t, []string{"ENV type=web env=prod"}, state.CommitMsg) + assert.Equal(t, []string{"type=web", "env=prod"}, state.Config.Env) } func TestCommandEnv_Advanced(t *testing.T) { @@ -239,15 +239,15 @@ func TestCommandEnv_Advanced(t *testing.T) { args: []string{"type", "web", "env", "prod"}, }} - b.state.config.Env = []string{"env=dev", "version=1.2.3"} + b.state.Config.Env = []string{"env=dev", "version=1.2.3"} state, err := cmd.Execute(b) if err != nil { t.Fatal(err) } - assert.Equal(t, []string{"ENV type=web env=prod"}, state.commitMsg) - assert.Equal(t, []string{"env=prod", "version=1.2.3", "type=web"}, state.config.Env) + assert.Equal(t, []string{"ENV type=web env=prod"}, state.CommitMsg) + assert.Equal(t, []string{"env=prod", "version=1.2.3", "type=web"}, state.Config.Env) } // =========== Testing LABEL =========== @@ -268,10 +268,10 @@ func TestCommandLabel_Simple(t *testing.T) { "env": "prod", } - t.Logf("Result labels: %# v", pretty.Formatter(state.config.Labels)) + t.Logf("Result labels: %# v", pretty.Formatter(state.Config.Labels)) - assert.Equal(t, []string{"LABEL type=web env=prod"}, state.commitMsg) - assert.True(t, reflect.DeepEqual(state.config.Labels, expectedLabels), "bad result labels") + assert.Equal(t, []string{"LABEL type=web env=prod"}, state.CommitMsg) + assert.True(t, reflect.DeepEqual(state.Config.Labels, expectedLabels), "bad result labels") } func TestCommandLabel_Advanced(t *testing.T) { @@ -280,7 +280,7 @@ func TestCommandLabel_Advanced(t *testing.T) { args: []string{"type", "web", "env", "prod"}, }} - b.state.config.Labels = map[string]string{ + b.state.Config.Labels = map[string]string{ "env": "dev", "version": "1.2.3", } @@ -296,10 +296,10 @@ func TestCommandLabel_Advanced(t *testing.T) { "env": "prod", } - t.Logf("Result labels: %# v", pretty.Formatter(state.config.Labels)) + t.Logf("Result labels: %# v", pretty.Formatter(state.Config.Labels)) - assert.Equal(t, []string{"LABEL type=web env=prod"}, state.commitMsg) - assert.True(t, reflect.DeepEqual(state.config.Labels, expectedLabels), "bad result labels") + assert.Equal(t, []string{"LABEL type=web env=prod"}, state.CommitMsg) + assert.True(t, reflect.DeepEqual(state.Config.Labels, expectedLabels), "bad result labels") } // =========== Testing CMD =========== @@ -315,7 +315,7 @@ func TestCommandCmd_Simple(t *testing.T) { t.Fatal(err) } - assert.Equal(t, []string{"/bin/sh", "-c", "apt-get install"}, state.config.Cmd) + assert.Equal(t, []string{"/bin/sh", "-c", "apt-get install"}, state.Config.Cmd) } func TestCommandCmd_Json(t *testing.T) { @@ -330,7 +330,7 @@ func TestCommandCmd_Json(t *testing.T) { t.Fatal(err) } - assert.Equal(t, []string{"apt-get", "install"}, state.config.Cmd) + assert.Equal(t, []string{"apt-get", "install"}, state.Config.Cmd) } // =========== Testing COPY =========== @@ -345,7 +345,7 @@ func TestCommandCopy_Simple(t *testing.T) { c.On("CreateContainer", mock.AnythingOfType("State")).Return("456", nil).Run(func(args mock.Arguments) { arg := args.Get(0).(State) // TODO: a better check - assert.True(t, len(arg.config.Cmd) > 0) + assert.True(t, len(arg.Config.Cmd) > 0) }).Once() c.On("UploadToContainer", "456", mock.AnythingOfType("*io.PipeReader"), "/").Return(nil).Once() @@ -358,7 +358,7 @@ func TestCommandCopy_Simple(t *testing.T) { t.Logf("state: %# v", pretty.Formatter(state)) c.AssertExpectations(t) - assert.Equal(t, "456", state.containerID) + assert.Equal(t, "456", state.ContainerID) } // TODO: test Cleanup diff --git a/src/rocker/build2/copy.go b/src/rocker/build2/copy.go index dbcffb7f..b91d957a 100644 --- a/src/rocker/build2/copy.go +++ b/src/rocker/build2/copy.go @@ -75,7 +75,7 @@ func copyFiles(b *Build, args []string, cmdName string) (s State, err error) { // skip COPY if no files matched if len(u.files) == 0 { log.Infof("| No files matched") - s.skipCommit = true + s.SkipCommit() return s, nil } @@ -92,16 +92,16 @@ func copyFiles(b *Build, args []string, cmdName string) (s State, err error) { // TODO: useful commit comment? message := fmt.Sprintf("%s %s to %s", cmdName, tarSum.Sum(nil), dest) - s.commitMsg = append(s.commitMsg, message) + s.Commit(message) - origCmd := s.config.Cmd - s.config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + message} + origCmd := s.Config.Cmd + s.Config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + message} - if s.containerID, err = b.client.CreateContainer(s); err != nil { + if s.ContainerID, err = b.client.CreateContainer(s); err != nil { return s, err } - s.config.Cmd = origCmd + s.Config.Cmd = origCmd // We need to make a new tar stream, because the previous one has been // read by the tarsum; maybe, optimize this in future @@ -111,7 +111,7 @@ func copyFiles(b *Build, args []string, cmdName string) (s State, err error) { // Copy to "/" because we made the prefix inside the tar archive // Do that because we are not able to reliably create directories inside the container - if err = b.client.UploadToContainer(s.containerID, u.tar, "/"); err != nil { + if err = b.client.UploadToContainer(s.ContainerID, u.tar, "/"); err != nil { return s, err } From 372f11e04484ce7713e153e357d653178fe26f81 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Wed, 23 Sep 2015 08:25:01 +0300 Subject: [PATCH 029/131] MAINTAINER impl --- src/rocker/build2/commands.go | 23 +++++++++++++++++++++++ src/rocker/build2/commands_test.go | 17 +++++++++++++++++ 2 files changed, 40 insertions(+) diff --git a/src/rocker/build2/commands.go b/src/rocker/build2/commands.go index 1b975696..2cff4b6b 100644 --- a/src/rocker/build2/commands.go +++ b/src/rocker/build2/commands.go @@ -53,6 +53,8 @@ func NewCommand(cfg ConfigCommand) (Command, error) { switch cfg.name { case "from": return &CommandFrom{cfg}, nil + case "maintainer": + return &CommandMaintainer{cfg}, nil case "run": return &CommandRun{cfg}, nil case "env": @@ -126,6 +128,27 @@ func (c *CommandFrom) Execute(b *Build) (s State, err error) { return s, nil } +// CommandMaintainer implements CMD +type CommandMaintainer struct { + cfg ConfigCommand +} + +func (c *CommandMaintainer) String() string { + return c.cfg.original +} + +func (c *CommandMaintainer) Execute(b *Build) (s State, err error) { + s = b.state + if len(c.cfg.args) != 1 { + return s, fmt.Errorf("MAINTAINER requires exactly one argument") + } + + // Don't see any sense of doing a commit here, as Docker does + s.SkipCommit() + + return s, nil +} + // CommandReset cleans the builder state before the next FROM type CommandCleanup struct { final bool diff --git a/src/rocker/build2/commands_test.go b/src/rocker/build2/commands_test.go index 28332ddd..c0b72605 100644 --- a/src/rocker/build2/commands_test.go +++ b/src/rocker/build2/commands_test.go @@ -302,6 +302,23 @@ func TestCommandLabel_Advanced(t *testing.T) { assert.True(t, reflect.DeepEqual(state.Config.Labels, expectedLabels), "bad result labels") } +// =========== Testing MAINTAINER =========== + +func TestCommandMaintainer_Simple(t *testing.T) { + b, _ := makeBuild(t, "", Config{}) + cmd := &CommandMaintainer{ConfigCommand{ + args: []string{"terminator"}, + }} + + state, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + assert.Len(t, state.CommitMsg, 1) + assert.Equal(t, COMMIT_SKIP, state.CommitMsg[0]) +} + // =========== Testing CMD =========== func TestCommandCmd_Simple(t *testing.T) { From ae88c2de8dc8fa02467a9604a0f0ca6af349c377 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Wed, 23 Sep 2015 08:55:00 +0300 Subject: [PATCH 030/131] WORKDIR impl --- src/rocker/build2/commands.go | 34 ++++++++++++++++++++++ src/rocker/build2/commands_test.go | 46 ++++++++++++++++++++++++++++++ 2 files changed, 80 insertions(+) diff --git a/src/rocker/build2/commands.go b/src/rocker/build2/commands.go index 2cff4b6b..d28636c3 100644 --- a/src/rocker/build2/commands.go +++ b/src/rocker/build2/commands.go @@ -18,6 +18,7 @@ package build2 import ( "fmt" + "path/filepath" "strings" log "github.com/Sirupsen/logrus" @@ -61,6 +62,8 @@ func NewCommand(cfg ConfigCommand) (Command, error) { return &CommandEnv{cfg}, nil case "label": return &CommandLabel{cfg}, nil + case "workdir": + return &CommandWorkdir{cfg}, nil case "tag": return &CommandTag{cfg}, nil case "copy": @@ -366,6 +369,37 @@ func (c *CommandLabel) Execute(b *Build) (s State, err error) { return s, nil } +// CommandWorkdir implements WORKDIR +type CommandWorkdir struct { + cfg ConfigCommand +} + +func (c *CommandWorkdir) String() string { + return c.cfg.original +} + +func (c *CommandWorkdir) Execute(b *Build) (s State, err error) { + + s = b.state + + if len(c.cfg.args) != 1 { + return s, fmt.Errorf("WORKDIR requires exactly one argument") + } + + workdir := c.cfg.args[0] + + if !filepath.IsAbs(workdir) { + current := s.Config.WorkingDir + workdir = filepath.Join("/", current, workdir) + } + + s.Config.WorkingDir = workdir + + s.Commit(fmt.Sprintf("WORKDIR %v", workdir)) + + return s, nil +} + // CommandCmd implements CMD type CommandCmd struct { cfg ConfigCommand diff --git a/src/rocker/build2/commands_test.go b/src/rocker/build2/commands_test.go index c0b72605..aa5b91e2 100644 --- a/src/rocker/build2/commands_test.go +++ b/src/rocker/build2/commands_test.go @@ -319,6 +319,52 @@ func TestCommandMaintainer_Simple(t *testing.T) { assert.Equal(t, COMMIT_SKIP, state.CommitMsg[0]) } +// =========== Testing WORKDIR =========== + +func TestCommandWorkdir_Simple(t *testing.T) { + b, _ := makeBuild(t, "", Config{}) + cmd := &CommandWorkdir{ConfigCommand{ + args: []string{"/app"}, + }} + + state, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, "/app", state.Config.WorkingDir) +} + +func TestCommandWorkdir_Relative_HasRoot(t *testing.T) { + b, _ := makeBuild(t, "", Config{}) + cmd := &CommandWorkdir{ConfigCommand{ + args: []string{"www"}, + }} + + b.state.Config.WorkingDir = "/home" + + state, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, "/home/www", state.Config.WorkingDir) +} + +func TestCommandWorkdir_Relative_NoRoot(t *testing.T) { + b, _ := makeBuild(t, "", Config{}) + cmd := &CommandWorkdir{ConfigCommand{ + args: []string{"www"}, + }} + + state, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, "/www", state.Config.WorkingDir) +} + // =========== Testing CMD =========== func TestCommandCmd_Simple(t *testing.T) { From 6a38588acc1f418fa88216c1fcc20b74e21236d2 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Wed, 23 Sep 2015 08:55:06 +0300 Subject: [PATCH 031/131] fix comment --- src/rocker/build2/commands.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rocker/build2/commands.go b/src/rocker/build2/commands.go index d28636c3..c3192d69 100644 --- a/src/rocker/build2/commands.go +++ b/src/rocker/build2/commands.go @@ -325,7 +325,7 @@ func (c *CommandEnv) Execute(b *Build) (s State, err error) { return s, nil } -// CommandEnv implements LABEL +// CommandLabel implements LABEL type CommandLabel struct { cfg ConfigCommand } From 239cc5518b41e9d379a302a5e15fe7ad998db20c Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Wed, 23 Sep 2015 08:55:14 +0300 Subject: [PATCH 032/131] added TODO about ONBUILD --- src/rocker/build2/plan.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/rocker/build2/plan.go b/src/rocker/build2/plan.go index 47b135d6..05f27fe1 100644 --- a/src/rocker/build2/plan.go +++ b/src/rocker/build2/plan.go @@ -42,6 +42,8 @@ func NewPlan(b *Build) (plan Plan, err error) { alwaysCommitAfter := "run attach add copy" neverCommitAfter := "from tag push" + // TODO: Process ONBUILD triggers if they exist + for i := 0; i < len(commands); i++ { cfg := commands[i] From 5642852e3c0d2a3ef4c6929dc386c4d07e747034 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Wed, 23 Sep 2015 09:07:22 +0300 Subject: [PATCH 033/131] ENTRYPOINT impl --- src/rocker/build2/commands.go | 39 +++++++++++++++++++++++++ src/rocker/build2/commands_test.go | 47 ++++++++++++++++++++++++++++++ 2 files changed, 86 insertions(+) diff --git a/src/rocker/build2/commands.go b/src/rocker/build2/commands.go index c3192d69..9c3b399f 100644 --- a/src/rocker/build2/commands.go +++ b/src/rocker/build2/commands.go @@ -72,6 +72,8 @@ func NewCommand(cfg ConfigCommand) (Command, error) { return &CommandAdd{cfg}, nil case "cmd": return &CommandCmd{cfg}, nil + case "entrypoint": + return &CommandEntrypoint{cfg}, nil } return nil, fmt.Errorf("Unknown command: %s", cfg.name) } @@ -430,6 +432,43 @@ func (c *CommandCmd) Execute(b *Build) (s State, err error) { return s, nil } +// CommandEntrypoint implements ENTRYPOINT +type CommandEntrypoint struct { + cfg ConfigCommand +} + +func (c *CommandEntrypoint) String() string { + return c.cfg.original +} + +func (c *CommandEntrypoint) Execute(b *Build) (s State, err error) { + s = b.state + + parsed := handleJSONArgs(c.cfg.args, c.cfg.attrs) + + switch { + case c.cfg.attrs["json"]: + // ENTRYPOINT ["echo", "hi"] + s.Config.Entrypoint = parsed + case len(parsed) == 0: + // ENTRYPOINT [] + s.Config.Entrypoint = nil + default: + // ENTRYPOINT echo hi + s.Config.Entrypoint = []string{"/bin/sh", "-c", parsed[0]} + } + + s.Commit(fmt.Sprintf("ENTRYPOINT %q", s.Config.Entrypoint)) + + // TODO: when setting the entrypoint if a CMD was not explicitly set then + // set the command to nil + // if !b.cmdSet { + // b.Config.Cmd = nil + // } + + return s, nil +} + // CommandTag implements TAG type CommandTag struct { cfg ConfigCommand diff --git a/src/rocker/build2/commands_test.go b/src/rocker/build2/commands_test.go index aa5b91e2..ed132002 100644 --- a/src/rocker/build2/commands_test.go +++ b/src/rocker/build2/commands_test.go @@ -396,6 +396,53 @@ func TestCommandCmd_Json(t *testing.T) { assert.Equal(t, []string{"apt-get", "install"}, state.Config.Cmd) } +// =========== Testing ENTRYPOINT =========== + +func TestCommandEntrypoint_Simple(t *testing.T) { + b, _ := makeBuild(t, "", Config{}) + cmd := &CommandEntrypoint{ConfigCommand{ + args: []string{"/bin/sh"}, + }} + + state, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, []string{"/bin/sh", "-c", "/bin/sh"}, state.Config.Entrypoint) +} + +func TestCommandEntrypoint_Json(t *testing.T) { + b, _ := makeBuild(t, "", Config{}) + cmd := &CommandEntrypoint{ConfigCommand{ + args: []string{"/bin/bash", "-c"}, + attrs: map[string]bool{"json": true}, + }} + + state, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, []string{"/bin/bash", "-c"}, state.Config.Entrypoint) +} + +func TestCommandEntrypoint_Remove(t *testing.T) { + b, _ := makeBuild(t, "", Config{}) + cmd := &CommandEntrypoint{ConfigCommand{ + args: []string{}, + }} + + b.state.Config.Entrypoint = []string{"/bin/sh", "-c"} + + state, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, []string(nil), state.Config.Entrypoint) +} + // =========== Testing COPY =========== func TestCommandCopy_Simple(t *testing.T) { From b5ba9899043df0f490e6e4959658b9469ed38399 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Wed, 23 Sep 2015 09:09:58 +0300 Subject: [PATCH 034/131] ENTRYPOINT overriding CMD --- src/rocker/build2/build.go | 1 + src/rocker/build2/commands.go | 16 ++++++++-------- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/src/rocker/build2/build.go b/src/rocker/build2/build.go index 91d66018..4cc8570b 100644 --- a/src/rocker/build2/build.go +++ b/src/rocker/build2/build.go @@ -45,6 +45,7 @@ type State struct { ContainerID string CommitMsg []string ProducedImage bool + CmdSet bool } type Build struct { diff --git a/src/rocker/build2/commands.go b/src/rocker/build2/commands.go index 9c3b399f..7da73d3d 100644 --- a/src/rocker/build2/commands.go +++ b/src/rocker/build2/commands.go @@ -424,10 +424,9 @@ func (c *CommandCmd) Execute(b *Build) (s State, err error) { s.Commit(fmt.Sprintf("CMD %q", cmd)) - // TODO: unsetting CMD? - // if len(args) != 0 { - // b.cmdSet = true - // } + if len(c.cfg.args) != 0 { + s.CmdSet = true + } return s, nil } @@ -460,11 +459,12 @@ func (c *CommandEntrypoint) Execute(b *Build) (s State, err error) { s.Commit(fmt.Sprintf("ENTRYPOINT %q", s.Config.Entrypoint)) - // TODO: when setting the entrypoint if a CMD was not explicitly set then + // TODO: test this + // when setting the entrypoint if a CMD was not explicitly set then // set the command to nil - // if !b.cmdSet { - // b.Config.Cmd = nil - // } + if !s.CmdSet { + s.Config.Cmd = nil + } return s, nil } From 06f6ac0cfba3ea0e712c2bea05771059254d6a66 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Wed, 23 Sep 2015 09:24:29 +0300 Subject: [PATCH 035/131] vendor docker/pkg/nat and docker/pkg/parsers --- vendor/manifest | 14 + .../github.com/docker/docker/pkg/nat/nat.go | 227 ++++++++ .../docker/docker/pkg/nat/nat_test.go | 525 ++++++++++++++++++ .../github.com/docker/docker/pkg/nat/sort.go | 98 ++++ .../docker/docker/pkg/nat/sort_test.go | 85 +++ .../docker/pkg/parsers/filters/parse.go | 134 +++++ .../docker/pkg/parsers/filters/parse_test.go | 218 ++++++++ .../docker/pkg/parsers/kernel/kernel.go | 100 ++++ .../docker/pkg/parsers/kernel/kernel_test.go | 92 +++ .../pkg/parsers/kernel/kernel_windows.go | 67 +++ .../docker/pkg/parsers/kernel/uname_linux.go | 19 + .../pkg/parsers/kernel/uname_unsupported.go | 18 + .../operatingsystem_freebsd.go | 18 + .../operatingsystem/operatingsystem_linux.go | 44 ++ .../operatingsystem/operatingsystem_test.go | 124 +++++ .../operatingsystem_windows.go | 49 ++ .../docker/docker/pkg/parsers/parsers.go | 198 +++++++ .../docker/docker/pkg/parsers/parsers_test.go | 240 ++++++++ 18 files changed, 2270 insertions(+) create mode 100644 vendor/src/github.com/docker/docker/pkg/nat/nat.go create mode 100644 vendor/src/github.com/docker/docker/pkg/nat/nat_test.go create mode 100644 vendor/src/github.com/docker/docker/pkg/nat/sort.go create mode 100644 vendor/src/github.com/docker/docker/pkg/nat/sort_test.go create mode 100644 vendor/src/github.com/docker/docker/pkg/parsers/filters/parse.go create mode 100644 vendor/src/github.com/docker/docker/pkg/parsers/filters/parse_test.go create mode 100644 vendor/src/github.com/docker/docker/pkg/parsers/kernel/kernel.go create mode 100644 vendor/src/github.com/docker/docker/pkg/parsers/kernel/kernel_test.go create mode 100644 vendor/src/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go create mode 100644 vendor/src/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go create mode 100644 vendor/src/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go create mode 100644 vendor/src/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_freebsd.go create mode 100644 vendor/src/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_linux.go create mode 100644 vendor/src/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_test.go create mode 100644 vendor/src/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_windows.go create mode 100644 vendor/src/github.com/docker/docker/pkg/parsers/parsers.go create mode 100644 vendor/src/github.com/docker/docker/pkg/parsers/parsers_test.go diff --git a/vendor/manifest b/vendor/manifest index 33da011c..680dfcee 100644 --- a/vendor/manifest +++ b/vendor/manifest @@ -122,6 +122,20 @@ "revision": "148be8bd7efd2cdb74b0cd9466fccb57c4c51834", "branch": "master", "path": "/pkg/tarsum" + }, + { + "importpath": "github.com/docker/docker/pkg/nat", + "repository": "https://github.com/docker/docker", + "revision": "b0dc11127ef4fc20261ccc0db03a16b17f7f91c4", + "branch": "master", + "path": "/pkg/nat" + }, + { + "importpath": "github.com/docker/docker/pkg/parsers", + "repository": "https://github.com/docker/docker", + "revision": "b0dc11127ef4fc20261ccc0db03a16b17f7f91c4", + "branch": "master", + "path": "/pkg/parsers" } ] } \ No newline at end of file diff --git a/vendor/src/github.com/docker/docker/pkg/nat/nat.go b/vendor/src/github.com/docker/docker/pkg/nat/nat.go new file mode 100644 index 00000000..6595feb0 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/nat/nat.go @@ -0,0 +1,227 @@ +package nat + +// nat is a convenience package for docker's manipulation of strings describing +// network ports. + +import ( + "fmt" + "net" + "strconv" + "strings" + + "github.com/docker/docker/pkg/parsers" +) + +const ( + // portSpecTemplate is the expected format for port specifications + portSpecTemplate = "ip:hostPort:containerPort" +) + +// PortBinding represents a binding between a Host IP address and a Host Port +type PortBinding struct { + // HostIP is the host IP Address + HostIP string `json:"HostIp"` + // HostPort is the host port number + HostPort string +} + +// PortMap is a collection of PortBinding indexed by Port +type PortMap map[Port][]PortBinding + +// PortSet is a collection of structs indexed by Port +type PortSet map[Port]struct{} + +// Port is a string containing port number and protocol in the format "80/tcp" +type Port string + +// NewPort creates a new instance of a Port given a protocol and port number or port range +func NewPort(proto, port string) (Port, error) { + // Check for parsing issues on "port" now so we can avoid having + // to check it later on. + + portStartInt, portEndInt, err := ParsePortRange(port) + if err != nil { + return "", err + } + + if portStartInt == portEndInt { + return Port(fmt.Sprintf("%d/%s", portStartInt, proto)), nil + } + return Port(fmt.Sprintf("%d-%d/%s", portStartInt, portEndInt, proto)), nil +} + +// ParsePort parses the port number string and returns an int +func ParsePort(rawPort string) (int, error) { + if len(rawPort) == 0 { + return 0, nil + } + port, err := strconv.ParseUint(rawPort, 10, 16) + if err != nil { + return 0, err + } + return int(port), nil +} + +// ParsePortRange parses the port range string and returns start/end ints +func ParsePortRange(rawPort string) (int, int, error) { + if len(rawPort) == 0 { + return 0, 0, nil + } + start, end, err := parsers.ParsePortRange(rawPort) + if err != nil { + return 0, 0, err + } + return int(start), int(end), nil +} + +// Proto returns the protocol of a Port +func (p Port) Proto() string { + proto, _ := SplitProtoPort(string(p)) + return proto +} + +// Port returns the port number of a Port +func (p Port) Port() string { + _, port := SplitProtoPort(string(p)) + return port +} + +// Int returns the port number of a Port as an int +func (p Port) Int() int { + portStr := p.Port() + if len(portStr) == 0 { + return 0 + } + + // We don't need to check for an error because we're going to + // assume that any error would have been found, and reported, in NewPort() + port, _ := strconv.ParseUint(portStr, 10, 16) + return int(port) +} + +// Range returns the start/end port numbers of a Port range as ints +func (p Port) Range() (int, int, error) { + return ParsePortRange(p.Port()) +} + +// SplitProtoPort splits a port in the format of proto/port +func SplitProtoPort(rawPort string) (string, string) { + parts := strings.Split(rawPort, "/") + l := len(parts) + if len(rawPort) == 0 || l == 0 || len(parts[0]) == 0 { + return "", "" + } + if l == 1 { + return "tcp", rawPort + } + if len(parts[1]) == 0 { + return "tcp", parts[0] + } + return parts[1], parts[0] +} + +func validateProto(proto string) bool { + for _, availableProto := range []string{"tcp", "udp"} { + if availableProto == proto { + return true + } + } + return false +} + +// ParsePortSpecs receives port specs in the format of ip:public:private/proto and parses +// these in to the internal types +func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, error) { + var ( + exposedPorts = make(map[Port]struct{}, len(ports)) + bindings = make(map[Port][]PortBinding) + ) + + for _, rawPort := range ports { + proto := "tcp" + + if i := strings.LastIndex(rawPort, "/"); i != -1 { + proto = rawPort[i+1:] + rawPort = rawPort[:i] + } + if !strings.Contains(rawPort, ":") { + rawPort = fmt.Sprintf("::%s", rawPort) + } else if len(strings.Split(rawPort, ":")) == 2 { + rawPort = fmt.Sprintf(":%s", rawPort) + } + + parts, err := parsers.PartParser(portSpecTemplate, rawPort) + if err != nil { + return nil, nil, err + } + + var ( + containerPort = parts["containerPort"] + rawIP = parts["ip"] + hostPort = parts["hostPort"] + ) + + if rawIP != "" && net.ParseIP(rawIP) == nil { + return nil, nil, fmt.Errorf("Invalid ip address: %s", rawIP) + } + if containerPort == "" { + return nil, nil, fmt.Errorf("No port specified: %s", rawPort) + } + + startPort, endPort, err := parsers.ParsePortRange(containerPort) + if err != nil { + return nil, nil, fmt.Errorf("Invalid containerPort: %s", containerPort) + } + + var startHostPort, endHostPort uint64 = 0, 0 + if len(hostPort) > 0 { + startHostPort, endHostPort, err = parsers.ParsePortRange(hostPort) + if err != nil { + return nil, nil, fmt.Errorf("Invalid hostPort: %s", hostPort) + } + } + + if hostPort != "" && (endPort-startPort) != (endHostPort-startHostPort) { + // Allow host port range iff containerPort is not a range. + // In this case, use the host port range as the dynamic + // host port range to allocate into. + if endPort != startPort { + return nil, nil, fmt.Errorf("Invalid ranges specified for container and host Ports: %s and %s", containerPort, hostPort) + } + } + + if !validateProto(strings.ToLower(proto)) { + return nil, nil, fmt.Errorf("Invalid proto: %s", proto) + } + + for i := uint64(0); i <= (endPort - startPort); i++ { + containerPort = strconv.FormatUint(startPort+i, 10) + if len(hostPort) > 0 { + hostPort = strconv.FormatUint(startHostPort+i, 10) + } + // Set hostPort to a range only if there is a single container port + // and a dynamic host port. + if startPort == endPort && startHostPort != endHostPort { + hostPort = fmt.Sprintf("%s-%s", hostPort, strconv.FormatUint(endHostPort, 10)) + } + port, err := NewPort(strings.ToLower(proto), containerPort) + if err != nil { + return nil, nil, err + } + if _, exists := exposedPorts[port]; !exists { + exposedPorts[port] = struct{}{} + } + + binding := PortBinding{ + HostIP: rawIP, + HostPort: hostPort, + } + bslice, exists := bindings[port] + if !exists { + bslice = []PortBinding{} + } + bindings[port] = append(bslice, binding) + } + } + return exposedPorts, bindings, nil +} diff --git a/vendor/src/github.com/docker/docker/pkg/nat/nat_test.go b/vendor/src/github.com/docker/docker/pkg/nat/nat_test.go new file mode 100644 index 00000000..2c71142b --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/nat/nat_test.go @@ -0,0 +1,525 @@ +package nat + +import ( + "testing" +) + +func TestParsePort(t *testing.T) { + var ( + p int + err error + ) + + p, err = ParsePort("1234") + + if err != nil || p != 1234 { + t.Fatal("Parsing '1234' did not succeed") + } + + // FIXME currently this is a valid port. I don't think it should be. + // I'm leaving this test commented out until we make a decision. + // - erikh + + /* + p, err = ParsePort("0123") + + if err != nil { + t.Fatal("Successfully parsed port '0123' to '123'") + } + */ + + p, err = ParsePort("asdf") + + if err == nil || p != 0 { + t.Fatal("Parsing port 'asdf' succeeded") + } + + p, err = ParsePort("1asdf") + + if err == nil || p != 0 { + t.Fatal("Parsing port '1asdf' succeeded") + } +} + +func TestParsePortRange(t *testing.T) { + var ( + begin int + end int + err error + ) + + type TestRange struct { + Range string + Begin int + End int + } + validRanges := []TestRange{ + {"1234", 1234, 1234}, + {"1234-1234", 1234, 1234}, + {"1234-1235", 1234, 1235}, + {"8000-9000", 8000, 9000}, + {"0", 0, 0}, + {"0-0", 0, 0}, + } + + for _, r := range validRanges { + begin, end, err = ParsePortRange(r.Range) + + if err != nil || begin != r.Begin { + t.Fatalf("Parsing port range '%s' did not succeed. Expected begin %d, got %d", r.Range, r.Begin, begin) + } + if err != nil || end != r.End { + t.Fatalf("Parsing port range '%s' did not succeed. Expected end %d, got %d", r.Range, r.End, end) + } + } + + invalidRanges := []string{ + "asdf", + "1asdf", + "9000-8000", + "9000-", + "-8000", + "-8000-", + } + + for _, r := range invalidRanges { + begin, end, err = ParsePortRange(r) + + if err == nil || begin != 0 || end != 0 { + t.Fatalf("Parsing port range '%s' succeeded", r) + } + } +} + +func TestPort(t *testing.T) { + p, err := NewPort("tcp", "1234") + + if err != nil { + t.Fatalf("tcp, 1234 had a parsing issue: %v", err) + } + + if string(p) != "1234/tcp" { + t.Fatal("tcp, 1234 did not result in the string 1234/tcp") + } + + if p.Proto() != "tcp" { + t.Fatal("protocol was not tcp") + } + + if p.Port() != "1234" { + t.Fatal("port string value was not 1234") + } + + if p.Int() != 1234 { + t.Fatal("port int value was not 1234") + } + + p, err = NewPort("tcp", "asd1234") + if err == nil { + t.Fatal("tcp, asd1234 was supposed to fail") + } + + p, err = NewPort("tcp", "1234-1230") + if err == nil { + t.Fatal("tcp, 1234-1230 was supposed to fail") + } + + p, err = NewPort("tcp", "1234-1242") + if err != nil { + t.Fatalf("tcp, 1234-1242 had a parsing issue: %v", err) + } + + if string(p) != "1234-1242/tcp" { + t.Fatal("tcp, 1234-1242 did not result in the string 1234-1242/tcp") + } +} + +func TestSplitProtoPort(t *testing.T) { + var ( + proto string + port string + ) + + proto, port = SplitProtoPort("1234/tcp") + + if proto != "tcp" || port != "1234" { + t.Fatal("Could not split 1234/tcp properly") + } + + proto, port = SplitProtoPort("") + + if proto != "" || port != "" { + t.Fatal("parsing an empty string yielded surprising results", proto, port) + } + + proto, port = SplitProtoPort("1234") + + if proto != "tcp" || port != "1234" { + t.Fatal("tcp is not the default protocol for portspec '1234'", proto, port) + } + + proto, port = SplitProtoPort("1234/") + + if proto != "tcp" || port != "1234" { + t.Fatal("parsing '1234/' yielded:" + port + "/" + proto) + } + + proto, port = SplitProtoPort("/tcp") + + if proto != "" || port != "" { + t.Fatal("parsing '/tcp' yielded:" + port + "/" + proto) + } +} + +func TestParsePortSpecs(t *testing.T) { + var ( + portMap map[Port]struct{} + bindingMap map[Port][]PortBinding + err error + ) + + portMap, bindingMap, err = ParsePortSpecs([]string{"1234/tcp", "2345/udp"}) + + if err != nil { + t.Fatalf("Error while processing ParsePortSpecs: %s", err) + } + + if _, ok := portMap[Port("1234/tcp")]; !ok { + t.Fatal("1234/tcp was not parsed properly") + } + + if _, ok := portMap[Port("2345/udp")]; !ok { + t.Fatal("2345/udp was not parsed properly") + } + + for portspec, bindings := range bindingMap { + if len(bindings) != 1 { + t.Fatalf("%s should have exactly one binding", portspec) + } + + if bindings[0].HostIP != "" { + t.Fatalf("HostIP should not be set for %s", portspec) + } + + if bindings[0].HostPort != "" { + t.Fatalf("HostPort should not be set for %s", portspec) + } + } + + portMap, bindingMap, err = ParsePortSpecs([]string{"1234:1234/tcp", "2345:2345/udp"}) + + if err != nil { + t.Fatalf("Error while processing ParsePortSpecs: %s", err) + } + + if _, ok := portMap[Port("1234/tcp")]; !ok { + t.Fatal("1234/tcp was not parsed properly") + } + + if _, ok := portMap[Port("2345/udp")]; !ok { + t.Fatal("2345/udp was not parsed properly") + } + + for portspec, bindings := range bindingMap { + _, port := SplitProtoPort(string(portspec)) + + if len(bindings) != 1 { + t.Fatalf("%s should have exactly one binding", portspec) + } + + if bindings[0].HostIP != "" { + t.Fatalf("HostIP should not be set for %s", portspec) + } + + if bindings[0].HostPort != port { + t.Fatalf("HostPort should be %s for %s", port, portspec) + } + } + + portMap, bindingMap, err = ParsePortSpecs([]string{"0.0.0.0:1234:1234/tcp", "0.0.0.0:2345:2345/udp"}) + + if err != nil { + t.Fatalf("Error while processing ParsePortSpecs: %s", err) + } + + if _, ok := portMap[Port("1234/tcp")]; !ok { + t.Fatal("1234/tcp was not parsed properly") + } + + if _, ok := portMap[Port("2345/udp")]; !ok { + t.Fatal("2345/udp was not parsed properly") + } + + for portspec, bindings := range bindingMap { + _, port := SplitProtoPort(string(portspec)) + + if len(bindings) != 1 { + t.Fatalf("%s should have exactly one binding", portspec) + } + + if bindings[0].HostIP != "0.0.0.0" { + t.Fatalf("HostIP is not 0.0.0.0 for %s", portspec) + } + + if bindings[0].HostPort != port { + t.Fatalf("HostPort should be %s for %s", port, portspec) + } + } + + _, _, err = ParsePortSpecs([]string{"localhost:1234:1234/tcp"}) + + if err == nil { + t.Fatal("Received no error while trying to parse a hostname instead of ip") + } +} + +func TestParsePortSpecsWithRange(t *testing.T) { + var ( + portMap map[Port]struct{} + bindingMap map[Port][]PortBinding + err error + ) + + portMap, bindingMap, err = ParsePortSpecs([]string{"1234-1236/tcp", "2345-2347/udp"}) + + if err != nil { + t.Fatalf("Error while processing ParsePortSpecs: %s", err) + } + + if _, ok := portMap[Port("1235/tcp")]; !ok { + t.Fatal("1234/tcp was not parsed properly") + } + + if _, ok := portMap[Port("2346/udp")]; !ok { + t.Fatal("2345/udp was not parsed properly") + } + + for portspec, bindings := range bindingMap { + if len(bindings) != 1 { + t.Fatalf("%s should have exactly one binding", portspec) + } + + if bindings[0].HostIP != "" { + t.Fatalf("HostIP should not be set for %s", portspec) + } + + if bindings[0].HostPort != "" { + t.Fatalf("HostPort should not be set for %s", portspec) + } + } + + portMap, bindingMap, err = ParsePortSpecs([]string{"1234-1236:1234-1236/tcp", "2345-2347:2345-2347/udp"}) + + if err != nil { + t.Fatalf("Error while processing ParsePortSpecs: %s", err) + } + + if _, ok := portMap[Port("1235/tcp")]; !ok { + t.Fatal("1234/tcp was not parsed properly") + } + + if _, ok := portMap[Port("2346/udp")]; !ok { + t.Fatal("2345/udp was not parsed properly") + } + + for portspec, bindings := range bindingMap { + _, port := SplitProtoPort(string(portspec)) + if len(bindings) != 1 { + t.Fatalf("%s should have exactly one binding", portspec) + } + + if bindings[0].HostIP != "" { + t.Fatalf("HostIP should not be set for %s", portspec) + } + + if bindings[0].HostPort != port { + t.Fatalf("HostPort should be %s for %s", port, portspec) + } + } + + portMap, bindingMap, err = ParsePortSpecs([]string{"0.0.0.0:1234-1236:1234-1236/tcp", "0.0.0.0:2345-2347:2345-2347/udp"}) + + if err != nil { + t.Fatalf("Error while processing ParsePortSpecs: %s", err) + } + + if _, ok := portMap[Port("1235/tcp")]; !ok { + t.Fatal("1234/tcp was not parsed properly") + } + + if _, ok := portMap[Port("2346/udp")]; !ok { + t.Fatal("2345/udp was not parsed properly") + } + + for portspec, bindings := range bindingMap { + _, port := SplitProtoPort(string(portspec)) + if len(bindings) != 1 || bindings[0].HostIP != "0.0.0.0" || bindings[0].HostPort != port { + t.Fatalf("Expect single binding to port %s but found %s", port, bindings) + } + } + + _, _, err = ParsePortSpecs([]string{"localhost:1234-1236:1234-1236/tcp"}) + + if err == nil { + t.Fatal("Received no error while trying to parse a hostname instead of ip") + } +} + +func TestParseNetworkOptsPrivateOnly(t *testing.T) { + ports, bindings, err := ParsePortSpecs([]string{"192.168.1.100::80"}) + if err != nil { + t.Fatal(err) + } + if len(ports) != 1 { + t.Logf("Expected 1 got %d", len(ports)) + t.FailNow() + } + if len(bindings) != 1 { + t.Logf("Expected 1 got %d", len(bindings)) + t.FailNow() + } + for k := range ports { + if k.Proto() != "tcp" { + t.Logf("Expected tcp got %s", k.Proto()) + t.Fail() + } + if k.Port() != "80" { + t.Logf("Expected 80 got %s", k.Port()) + t.Fail() + } + b, exists := bindings[k] + if !exists { + t.Log("Binding does not exist") + t.FailNow() + } + if len(b) != 1 { + t.Logf("Expected 1 got %d", len(b)) + t.FailNow() + } + s := b[0] + if s.HostPort != "" { + t.Logf("Expected \"\" got %s", s.HostPort) + t.Fail() + } + if s.HostIP != "192.168.1.100" { + t.Fail() + } + } +} + +func TestParseNetworkOptsPublic(t *testing.T) { + ports, bindings, err := ParsePortSpecs([]string{"192.168.1.100:8080:80"}) + if err != nil { + t.Fatal(err) + } + if len(ports) != 1 { + t.Logf("Expected 1 got %d", len(ports)) + t.FailNow() + } + if len(bindings) != 1 { + t.Logf("Expected 1 got %d", len(bindings)) + t.FailNow() + } + for k := range ports { + if k.Proto() != "tcp" { + t.Logf("Expected tcp got %s", k.Proto()) + t.Fail() + } + if k.Port() != "80" { + t.Logf("Expected 80 got %s", k.Port()) + t.Fail() + } + b, exists := bindings[k] + if !exists { + t.Log("Binding does not exist") + t.FailNow() + } + if len(b) != 1 { + t.Logf("Expected 1 got %d", len(b)) + t.FailNow() + } + s := b[0] + if s.HostPort != "8080" { + t.Logf("Expected 8080 got %s", s.HostPort) + t.Fail() + } + if s.HostIP != "192.168.1.100" { + t.Fail() + } + } +} + +func TestParseNetworkOptsPublicNoPort(t *testing.T) { + ports, bindings, err := ParsePortSpecs([]string{"192.168.1.100"}) + + if err == nil { + t.Logf("Expected error Invalid containerPort") + t.Fail() + } + if ports != nil { + t.Logf("Expected nil got %s", ports) + t.Fail() + } + if bindings != nil { + t.Logf("Expected nil got %s", bindings) + t.Fail() + } +} + +func TestParseNetworkOptsNegativePorts(t *testing.T) { + ports, bindings, err := ParsePortSpecs([]string{"192.168.1.100:-1:-1"}) + + if err == nil { + t.Fail() + } + if len(ports) != 0 { + t.Logf("Expected nil got %d", len(ports)) + t.Fail() + } + if len(bindings) != 0 { + t.Logf("Expected 0 got %d", len(bindings)) + t.Fail() + } +} + +func TestParseNetworkOptsUdp(t *testing.T) { + ports, bindings, err := ParsePortSpecs([]string{"192.168.1.100::6000/udp"}) + if err != nil { + t.Fatal(err) + } + if len(ports) != 1 { + t.Logf("Expected 1 got %d", len(ports)) + t.FailNow() + } + if len(bindings) != 1 { + t.Logf("Expected 1 got %d", len(bindings)) + t.FailNow() + } + for k := range ports { + if k.Proto() != "udp" { + t.Logf("Expected udp got %s", k.Proto()) + t.Fail() + } + if k.Port() != "6000" { + t.Logf("Expected 6000 got %s", k.Port()) + t.Fail() + } + b, exists := bindings[k] + if !exists { + t.Log("Binding does not exist") + t.FailNow() + } + if len(b) != 1 { + t.Logf("Expected 1 got %d", len(b)) + t.FailNow() + } + s := b[0] + if s.HostPort != "" { + t.Logf("Expected \"\" got %s", s.HostPort) + t.Fail() + } + if s.HostIP != "192.168.1.100" { + t.Fail() + } + } +} diff --git a/vendor/src/github.com/docker/docker/pkg/nat/sort.go b/vendor/src/github.com/docker/docker/pkg/nat/sort.go new file mode 100644 index 00000000..1eb0fedd --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/nat/sort.go @@ -0,0 +1,98 @@ +package nat + +import ( + "sort" + "strings" + + "github.com/docker/docker/pkg/parsers" +) + +type portSorter struct { + ports []Port + by func(i, j Port) bool +} + +func (s *portSorter) Len() int { + return len(s.ports) +} + +func (s *portSorter) Swap(i, j int) { + s.ports[i], s.ports[j] = s.ports[j], s.ports[i] +} + +func (s *portSorter) Less(i, j int) bool { + ip := s.ports[i] + jp := s.ports[j] + + return s.by(ip, jp) +} + +// Sort sorts a list of ports using the provided predicate +// This function should compare `i` and `j`, returning true if `i` is +// considered to be less than `j` +func Sort(ports []Port, predicate func(i, j Port) bool) { + s := &portSorter{ports, predicate} + sort.Sort(s) +} + +type portMapEntry struct { + port Port + binding PortBinding +} + +type portMapSorter []portMapEntry + +func (s portMapSorter) Len() int { return len(s) } +func (s portMapSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// sort the port so that the order is: +// 1. port with larger specified bindings +// 2. larger port +// 3. port with tcp protocol +func (s portMapSorter) Less(i, j int) bool { + pi, pj := s[i].port, s[j].port + hpi, hpj := toInt(s[i].binding.HostPort), toInt(s[j].binding.HostPort) + return hpi > hpj || pi.Int() > pj.Int() || (pi.Int() == pj.Int() && strings.ToLower(pi.Proto()) == "tcp") +} + +// SortPortMap sorts the list of ports and their respected mapping. The ports +// will explicit HostPort will be placed first. +func SortPortMap(ports []Port, bindings PortMap) { + s := portMapSorter{} + for _, p := range ports { + if binding, ok := bindings[p]; ok { + for _, b := range binding { + s = append(s, portMapEntry{port: p, binding: b}) + } + bindings[p] = []PortBinding{} + } else { + s = append(s, portMapEntry{port: p}) + } + } + + sort.Sort(s) + var ( + i int + pm = make(map[Port]struct{}) + ) + // reorder ports + for _, entry := range s { + if _, ok := pm[entry.port]; !ok { + ports[i] = entry.port + pm[entry.port] = struct{}{} + i++ + } + // reorder bindings for this port + if _, ok := bindings[entry.port]; ok { + bindings[entry.port] = append(bindings[entry.port], entry.binding) + } + } +} + +func toInt(s string) uint64 { + i, _, err := parsers.ParsePortRange(s) + if err != nil { + i = 0 + } + return i +} diff --git a/vendor/src/github.com/docker/docker/pkg/nat/sort_test.go b/vendor/src/github.com/docker/docker/pkg/nat/sort_test.go new file mode 100644 index 00000000..88ed9111 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/nat/sort_test.go @@ -0,0 +1,85 @@ +package nat + +import ( + "fmt" + "reflect" + "testing" +) + +func TestSortUniquePorts(t *testing.T) { + ports := []Port{ + Port("6379/tcp"), + Port("22/tcp"), + } + + Sort(ports, func(ip, jp Port) bool { + return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && ip.Proto() == "tcp") + }) + + first := ports[0] + if fmt.Sprint(first) != "22/tcp" { + t.Log(fmt.Sprint(first)) + t.Fail() + } +} + +func TestSortSamePortWithDifferentProto(t *testing.T) { + ports := []Port{ + Port("8888/tcp"), + Port("8888/udp"), + Port("6379/tcp"), + Port("6379/udp"), + } + + Sort(ports, func(ip, jp Port) bool { + return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && ip.Proto() == "tcp") + }) + + first := ports[0] + if fmt.Sprint(first) != "6379/tcp" { + t.Fail() + } +} + +func TestSortPortMap(t *testing.T) { + ports := []Port{ + Port("22/tcp"), + Port("22/udp"), + Port("8000/tcp"), + Port("6379/tcp"), + Port("9999/tcp"), + } + + portMap := PortMap{ + Port("22/tcp"): []PortBinding{ + {}, + }, + Port("8000/tcp"): []PortBinding{ + {}, + }, + Port("6379/tcp"): []PortBinding{ + {}, + {HostIP: "0.0.0.0", HostPort: "32749"}, + }, + Port("9999/tcp"): []PortBinding{ + {HostIP: "0.0.0.0", HostPort: "40000"}, + }, + } + + SortPortMap(ports, portMap) + if !reflect.DeepEqual(ports, []Port{ + Port("9999/tcp"), + Port("6379/tcp"), + Port("8000/tcp"), + Port("22/tcp"), + Port("22/udp"), + }) { + t.Errorf("failed to prioritize port with explicit mappings, got %v", ports) + } + if pm := portMap[Port("6379/tcp")]; !reflect.DeepEqual(pm, []PortBinding{ + {HostIP: "0.0.0.0", HostPort: "32749"}, + {}, + }) { + t.Errorf("failed to prioritize bindings with explicit mappings, got %v", pm) + } +} diff --git a/vendor/src/github.com/docker/docker/pkg/parsers/filters/parse.go b/vendor/src/github.com/docker/docker/pkg/parsers/filters/parse.go new file mode 100644 index 00000000..6c394f16 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/parsers/filters/parse.go @@ -0,0 +1,134 @@ +// Package filters provides helper function to parse and handle command line +// filter, used for example in docker ps or docker images commands. +package filters + +import ( + "encoding/json" + "errors" + "regexp" + "strings" +) + +// Args stores filter arguments as map key:{array of values}. +// It contains a aggregation of the list of arguments (which are in the form +// of -f 'key=value') based on the key, and store values for the same key +// in an slice. +// e.g given -f 'label=label1=1' -f 'label=label2=2' -f 'image.name=ubuntu' +// the args will be {'label': {'label1=1','label2=2'}, 'image.name', {'ubuntu'}} +type Args map[string][]string + +// ParseFlag parses the argument to the filter flag. Like +// +// `docker ps -f 'created=today' -f 'image.name=ubuntu*'` +// +// If prev map is provided, then it is appended to, and returned. By default a new +// map is created. +func ParseFlag(arg string, prev Args) (Args, error) { + filters := prev + if prev == nil { + filters = Args{} + } + if len(arg) == 0 { + return filters, nil + } + + if !strings.Contains(arg, "=") { + return filters, ErrBadFormat + } + + f := strings.SplitN(arg, "=", 2) + name := strings.ToLower(strings.TrimSpace(f[0])) + value := strings.TrimSpace(f[1]) + filters[name] = append(filters[name], value) + + return filters, nil +} + +// ErrBadFormat is an error returned in case of bad format for a filter. +var ErrBadFormat = errors.New("bad format of filter (expected name=value)") + +// ToParam packs the Args into an string for easy transport from client to server. +func ToParam(a Args) (string, error) { + // this way we don't URL encode {}, just empty space + if len(a) == 0 { + return "", nil + } + + buf, err := json.Marshal(a) + if err != nil { + return "", err + } + return string(buf), nil +} + +// FromParam unpacks the filter Args. +func FromParam(p string) (Args, error) { + args := Args{} + if len(p) == 0 { + return args, nil + } + if err := json.NewDecoder(strings.NewReader(p)).Decode(&args); err != nil { + return nil, err + } + return args, nil +} + +// MatchKVList returns true if the values for the specified field maches the ones +// from the sources. +// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}}, +// field is 'label' and sources are {'label':{'label1=1','label2=2','label3=3'}} +// it returns true. +func (filters Args) MatchKVList(field string, sources map[string]string) bool { + fieldValues := filters[field] + + //do not filter if there is no filter set or cannot determine filter + if len(fieldValues) == 0 { + return true + } + + if sources == nil || len(sources) == 0 { + return false + } + +outer: + for _, name2match := range fieldValues { + testKV := strings.SplitN(name2match, "=", 2) + + for k, v := range sources { + if len(testKV) == 1 { + if k == testKV[0] { + continue outer + } + } else if k == testKV[0] && v == testKV[1] { + continue outer + } + } + + return false + } + + return true +} + +// Match returns true if the values for the specified field matches the source string +// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}}, +// field is 'image.name' and source is 'ubuntu' +// it returns true. +func (filters Args) Match(field, source string) bool { + fieldValues := filters[field] + + //do not filter if there is no filter set or cannot determine filter + if len(fieldValues) == 0 { + return true + } + for _, name2match := range fieldValues { + match, err := regexp.MatchString(name2match, source) + if err != nil { + continue + } + if match { + return true + } + } + return false +} diff --git a/vendor/src/github.com/docker/docker/pkg/parsers/filters/parse_test.go b/vendor/src/github.com/docker/docker/pkg/parsers/filters/parse_test.go new file mode 100644 index 00000000..eb9fcef9 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/parsers/filters/parse_test.go @@ -0,0 +1,218 @@ +package filters + +import ( + "sort" + "testing" +) + +func TestParseArgs(t *testing.T) { + // equivalent of `docker ps -f 'created=today' -f 'image.name=ubuntu*' -f 'image.name=*untu'` + flagArgs := []string{ + "created=today", + "image.name=ubuntu*", + "image.name=*untu", + } + var ( + args = Args{} + err error + ) + for i := range flagArgs { + args, err = ParseFlag(flagArgs[i], args) + if err != nil { + t.Errorf("failed to parse %s: %s", flagArgs[i], err) + } + } + if len(args["created"]) != 1 { + t.Errorf("failed to set this arg") + } + if len(args["image.name"]) != 2 { + t.Errorf("the args should have collapsed") + } +} + +func TestParseArgsEdgeCase(t *testing.T) { + var filters Args + args, err := ParseFlag("", filters) + if err != nil { + t.Fatal(err) + } + if args == nil || len(args) != 0 { + t.Fatalf("Expected an empty Args (map), got %v", args) + } + if args, err = ParseFlag("anything", args); err == nil || err != ErrBadFormat { + t.Fatalf("Expected ErrBadFormat, got %v", err) + } +} + +func TestToParam(t *testing.T) { + a := Args{ + "created": []string{"today"}, + "image.name": []string{"ubuntu*", "*untu"}, + } + + _, err := ToParam(a) + if err != nil { + t.Errorf("failed to marshal the filters: %s", err) + } +} + +func TestFromParam(t *testing.T) { + invalids := []string{ + "anything", + "['a','list']", + "{'key': 'value'}", + `{"key": "value"}`, + } + valids := map[string]Args{ + `{"key": ["value"]}`: { + "key": {"value"}, + }, + `{"key": ["value1", "value2"]}`: { + "key": {"value1", "value2"}, + }, + `{"key1": ["value1"], "key2": ["value2"]}`: { + "key1": {"value1"}, + "key2": {"value2"}, + }, + } + for _, invalid := range invalids { + if _, err := FromParam(invalid); err == nil { + t.Fatalf("Expected an error with %v, got nothing", invalid) + } + } + for json, expectedArgs := range valids { + args, err := FromParam(json) + if err != nil { + t.Fatal(err) + } + if len(args) != len(expectedArgs) { + t.Fatalf("Expected %v, go %v", expectedArgs, args) + } + for key, expectedValues := range expectedArgs { + values := args[key] + sort.Strings(values) + sort.Strings(expectedValues) + if len(values) != len(expectedValues) { + t.Fatalf("Expected %v, go %v", expectedArgs, args) + } + for index, expectedValue := range expectedValues { + if values[index] != expectedValue { + t.Fatalf("Expected %v, go %v", expectedArgs, args) + } + } + } + } +} + +func TestEmpty(t *testing.T) { + a := Args{} + v, err := ToParam(a) + if err != nil { + t.Errorf("failed to marshal the filters: %s", err) + } + v1, err := FromParam(v) + if err != nil { + t.Errorf("%s", err) + } + if len(a) != len(v1) { + t.Errorf("these should both be empty sets") + } +} + +func TestArgsMatchKVList(t *testing.T) { + // empty sources + args := Args{ + "created": []string{"today"}, + } + if args.MatchKVList("created", map[string]string{}) { + t.Fatalf("Expected false for (%v,created), got true", args) + } + // Not empty sources + sources := map[string]string{ + "key1": "value1", + "key2": "value2", + "key3": "value3", + } + matches := map[*Args]string{ + &Args{}: "field", + &Args{ + "created": []string{"today"}, + "labels": []string{"key1"}, + }: "labels", + &Args{ + "created": []string{"today"}, + "labels": []string{"key1=value1"}, + }: "labels", + } + differs := map[*Args]string{ + &Args{ + "created": []string{"today"}, + }: "created", + &Args{ + "created": []string{"today"}, + "labels": []string{"key4"}, + }: "labels", + &Args{ + "created": []string{"today"}, + "labels": []string{"key1=value3"}, + }: "labels", + } + for args, field := range matches { + if args.MatchKVList(field, sources) != true { + t.Fatalf("Expected true for %v on %v, got false", sources, args) + } + } + for args, field := range differs { + if args.MatchKVList(field, sources) != false { + t.Fatalf("Expected false for %v on %v, got true", sources, args) + } + } +} + +func TestArgsMatch(t *testing.T) { + source := "today" + matches := map[*Args]string{ + &Args{}: "field", + &Args{ + "created": []string{"today"}, + "labels": []string{"key1"}, + }: "today", + &Args{ + "created": []string{"to*"}, + }: "created", + &Args{ + "created": []string{"to(.*)"}, + }: "created", + &Args{ + "created": []string{"tod"}, + }: "created", + &Args{ + "created": []string{"anything", "to*"}, + }: "created", + } + differs := map[*Args]string{ + &Args{ + "created": []string{"tomorrow"}, + }: "created", + &Args{ + "created": []string{"to(day"}, + }: "created", + &Args{ + "created": []string{"tom(.*)"}, + }: "created", + &Args{ + "created": []string{"today1"}, + "labels": []string{"today"}, + }: "created", + } + for args, field := range matches { + if args.Match(field, source) != true { + t.Fatalf("Expected true for %v on %v, got false", source, args) + } + } + for args, field := range differs { + if args.Match(field, source) != false { + t.Fatalf("Expected false for %v on %v, got true", source, args) + } + } +} diff --git a/vendor/src/github.com/docker/docker/pkg/parsers/kernel/kernel.go b/vendor/src/github.com/docker/docker/pkg/parsers/kernel/kernel.go new file mode 100644 index 00000000..a21ba137 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/parsers/kernel/kernel.go @@ -0,0 +1,100 @@ +// +build !windows + +// Package kernel provides helper function to get, parse and compare kernel +// versions for different platforms. +package kernel + +import ( + "bytes" + "errors" + "fmt" +) + +// VersionInfo holds information about the kernel. +type VersionInfo struct { + Kernel int // Version of the kernel (e.g. 4.1.2-generic -> 4) + Major int // Major part of the kernel version (e.g. 4.1.2-generic -> 1) + Minor int // Minor part of the kernel version (e.g. 4.1.2-generic -> 2) + Flavor string // Flavor of the kernel version (e.g. 4.1.2-generic -> generic) +} + +func (k *VersionInfo) String() string { + return fmt.Sprintf("%d.%d.%d%s", k.Kernel, k.Major, k.Minor, k.Flavor) +} + +// CompareKernelVersion compares two kernel.VersionInfo structs. +// Returns -1 if a < b, 0 if a == b, 1 it a > b +func CompareKernelVersion(a, b VersionInfo) int { + if a.Kernel < b.Kernel { + return -1 + } else if a.Kernel > b.Kernel { + return 1 + } + + if a.Major < b.Major { + return -1 + } else if a.Major > b.Major { + return 1 + } + + if a.Minor < b.Minor { + return -1 + } else if a.Minor > b.Minor { + return 1 + } + + return 0 +} + +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() (*VersionInfo, error) { + var ( + err error + ) + + uts, err := uname() + if err != nil { + return nil, err + } + + release := make([]byte, len(uts.Release)) + + i := 0 + for _, c := range uts.Release { + release[i] = byte(c) + i++ + } + + // Remove the \x00 from the release for Atoi to parse correctly + release = release[:bytes.IndexByte(release, 0)] + + return ParseRelease(string(release)) +} + +// ParseRelease parses a string and creates a VersionInfo based on it. +func ParseRelease(release string) (*VersionInfo, error) { + var ( + kernel, major, minor, parsed int + flavor, partial string + ) + + // Ignore error from Sscanf to allow an empty flavor. Instead, just + // make sure we got all the version numbers. + parsed, _ = fmt.Sscanf(release, "%d.%d%s", &kernel, &major, &partial) + if parsed < 2 { + return nil, errors.New("Can't parse kernel version " + release) + } + + // sometimes we have 3.12.25-gentoo, but sometimes we just have 3.12-1-amd64 + parsed, _ = fmt.Sscanf(partial, ".%d%s", &minor, &flavor) + if parsed < 1 { + flavor = partial + } + + return &VersionInfo{ + Kernel: kernel, + Major: major, + Minor: minor, + Flavor: flavor, + }, nil +} diff --git a/vendor/src/github.com/docker/docker/pkg/parsers/kernel/kernel_test.go b/vendor/src/github.com/docker/docker/pkg/parsers/kernel/kernel_test.go new file mode 100644 index 00000000..6a2c2468 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/parsers/kernel/kernel_test.go @@ -0,0 +1,92 @@ +package kernel + +import ( + "fmt" + "testing" +) + +func assertParseRelease(t *testing.T, release string, b *VersionInfo, result int) { + var ( + a *VersionInfo + ) + a, _ = ParseRelease(release) + + if r := CompareKernelVersion(*a, *b); r != result { + t.Fatalf("Unexpected kernel version comparison result for (%v,%v). Found %d, expected %d", release, b, r, result) + } + if a.Flavor != b.Flavor { + t.Fatalf("Unexpected parsed kernel flavor. Found %s, expected %s", a.Flavor, b.Flavor) + } +} + +func TestParseRelease(t *testing.T) { + assertParseRelease(t, "3.8.0", &VersionInfo{Kernel: 3, Major: 8, Minor: 0}, 0) + assertParseRelease(t, "3.4.54.longterm-1", &VersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0) + assertParseRelease(t, "3.4.54.longterm-1", &VersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0) + assertParseRelease(t, "3.8.0-19-generic", &VersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "-19-generic"}, 0) + assertParseRelease(t, "3.12.8tag", &VersionInfo{Kernel: 3, Major: 12, Minor: 8, Flavor: "tag"}, 0) + assertParseRelease(t, "3.12-1-amd64", &VersionInfo{Kernel: 3, Major: 12, Minor: 0, Flavor: "-1-amd64"}, 0) + assertParseRelease(t, "3.8.0", &VersionInfo{Kernel: 4, Major: 8, Minor: 0}, -1) + // Errors + invalids := []string{ + "3", + "a", + "a.a", + "a.a.a-a", + } + for _, invalid := range invalids { + expectedMessage := fmt.Sprintf("Can't parse kernel version %v", invalid) + if _, err := ParseRelease(invalid); err == nil || err.Error() != expectedMessage { + + } + } +} + +func assertKernelVersion(t *testing.T, a, b VersionInfo, result int) { + if r := CompareKernelVersion(a, b); r != result { + t.Fatalf("Unexpected kernel version comparison result. Found %d, expected %d", r, result) + } +} + +func TestCompareKernelVersion(t *testing.T) { + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + 0) + assertKernelVersion(t, + VersionInfo{Kernel: 2, Major: 6, Minor: 0}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + -1) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + VersionInfo{Kernel: 2, Major: 6, Minor: 0}, + 1) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + 0) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 8, Minor: 5}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + 1) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 0, Minor: 20}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + -1) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 7, Minor: 20}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + -1) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 8, Minor: 20}, + VersionInfo{Kernel: 3, Major: 7, Minor: 0}, + 1) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 8, Minor: 20}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + 1) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + VersionInfo{Kernel: 3, Major: 8, Minor: 20}, + -1) +} diff --git a/vendor/src/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go b/vendor/src/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go new file mode 100644 index 00000000..85ca250c --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go @@ -0,0 +1,67 @@ +package kernel + +import ( + "fmt" + "syscall" + "unsafe" +) + +// VersionInfo holds information about the kernel. +type VersionInfo struct { + kvi string // Version of the kernel (e.g. 6.1.7601.17592 -> 6) + major int // Major part of the kernel version (e.g. 6.1.7601.17592 -> 1) + minor int // Minor part of the kernel version (e.g. 6.1.7601.17592 -> 7601) + build int // Build number of the kernel version (e.g. 6.1.7601.17592 -> 17592) +} + +func (k *VersionInfo) String() string { + return fmt.Sprintf("%d.%d %d (%s)", k.major, k.minor, k.build, k.kvi) +} + +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() (*VersionInfo, error) { + + var ( + h syscall.Handle + dwVersion uint32 + err error + ) + + KVI := &VersionInfo{"Unknown", 0, 0, 0} + + if err = syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE, + syscall.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`), + 0, + syscall.KEY_READ, + &h); err != nil { + return KVI, err + } + defer syscall.RegCloseKey(h) + + var buf [1 << 10]uint16 + var typ uint32 + n := uint32(len(buf) * 2) // api expects array of bytes, not uint16 + + if err = syscall.RegQueryValueEx(h, + syscall.StringToUTF16Ptr("BuildLabEx"), + nil, + &typ, + (*byte)(unsafe.Pointer(&buf[0])), + &n); err != nil { + return KVI, err + } + + KVI.kvi = syscall.UTF16ToString(buf[:]) + + // Important - docker.exe MUST be manifested for this API to return + // the correct information. + if dwVersion, err = syscall.GetVersion(); err != nil { + return KVI, err + } + + KVI.major = int(dwVersion & 0xFF) + KVI.minor = int((dwVersion & 0XFF00) >> 8) + KVI.build = int((dwVersion & 0xFFFF0000) >> 16) + + return KVI, nil +} diff --git a/vendor/src/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go b/vendor/src/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go new file mode 100644 index 00000000..7d12fcbd --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go @@ -0,0 +1,19 @@ +package kernel + +import ( + "syscall" +) + +// Utsname represents the system name structure. +// It is passthgrouh for syscall.Utsname in order to make it portable with +// other platforms where it is not available. +type Utsname syscall.Utsname + +func uname() (*syscall.Utsname, error) { + uts := &syscall.Utsname{} + + if err := syscall.Uname(uts); err != nil { + return nil, err + } + return uts, nil +} diff --git a/vendor/src/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go b/vendor/src/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go new file mode 100644 index 00000000..79c66b32 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go @@ -0,0 +1,18 @@ +// +build !linux + +package kernel + +import ( + "errors" +) + +// Utsname represents the system name structure. +// It is defined here to make it portable as it is available on linux but not +// on windows. +type Utsname struct { + Release [65]byte +} + +func uname() (*Utsname, error) { + return nil, errors.New("Kernel version detection is available only on linux") +} diff --git a/vendor/src/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_freebsd.go b/vendor/src/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_freebsd.go new file mode 100644 index 00000000..0589cf2a --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_freebsd.go @@ -0,0 +1,18 @@ +package operatingsystem + +import ( + "errors" +) + +// GetOperatingSystem gets the name of the current operating system. +func GetOperatingSystem() (string, error) { + // TODO: Implement OS detection + return "", errors.New("Cannot detect OS version") +} + +// IsContainerized returns true if we are running inside a container. +// No-op on FreeBSD, always returns false. +func IsContainerized() (bool, error) { + // TODO: Implement jail detection + return false, errors.New("Cannot detect if we are in container") +} diff --git a/vendor/src/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_linux.go b/vendor/src/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_linux.go new file mode 100644 index 00000000..ca8ea8f0 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_linux.go @@ -0,0 +1,44 @@ +// Package operatingsystem provides helper function to get the operating system +// name for different platforms. +package operatingsystem + +import ( + "bytes" + "errors" + "io/ioutil" +) + +var ( + // file to use to detect if the daemon is running in a container + proc1Cgroup = "/proc/1/cgroup" + + // file to check to determine Operating System + etcOsRelease = "/etc/os-release" +) + +// GetOperatingSystem gets the name of the current operating system. +func GetOperatingSystem() (string, error) { + b, err := ioutil.ReadFile(etcOsRelease) + if err != nil { + return "", err + } + if i := bytes.Index(b, []byte("PRETTY_NAME")); i >= 0 { + b = b[i+13:] + return string(b[:bytes.IndexByte(b, '"')]), nil + } + return "", errors.New("PRETTY_NAME not found") +} + +// IsContainerized returns true if we are running inside a container. +func IsContainerized() (bool, error) { + b, err := ioutil.ReadFile(proc1Cgroup) + if err != nil { + return false, err + } + for _, line := range bytes.Split(b, []byte{'\n'}) { + if len(line) > 0 && !bytes.HasSuffix(line, []byte{'/'}) { + return true, nil + } + } + return false, nil +} diff --git a/vendor/src/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_test.go b/vendor/src/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_test.go new file mode 100644 index 00000000..b7d54cbb --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_test.go @@ -0,0 +1,124 @@ +package operatingsystem + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func TestGetOperatingSystem(t *testing.T) { + var ( + backup = etcOsRelease + ubuntuTrusty = []byte(`NAME="Ubuntu" +VERSION="14.04, Trusty Tahr" +ID=ubuntu +ID_LIKE=debian +PRETTY_NAME="Ubuntu 14.04 LTS" +VERSION_ID="14.04" +HOME_URL="http://www.ubuntu.com/" +SUPPORT_URL="http://help.ubuntu.com/" +BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`) + gentoo = []byte(`NAME=Gentoo +ID=gentoo +PRETTY_NAME="Gentoo/Linux" +ANSI_COLOR="1;32" +HOME_URL="http://www.gentoo.org/" +SUPPORT_URL="http://www.gentoo.org/main/en/support.xml" +BUG_REPORT_URL="https://bugs.gentoo.org/" +`) + noPrettyName = []byte(`NAME="Ubuntu" +VERSION="14.04, Trusty Tahr" +ID=ubuntu +ID_LIKE=debian +VERSION_ID="14.04" +HOME_URL="http://www.ubuntu.com/" +SUPPORT_URL="http://help.ubuntu.com/" +BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`) + ) + + dir := os.TempDir() + etcOsRelease = filepath.Join(dir, "etcOsRelease") + + defer func() { + os.Remove(etcOsRelease) + etcOsRelease = backup + }() + + for expect, osRelease := range map[string][]byte{ + "Ubuntu 14.04 LTS": ubuntuTrusty, + "Gentoo/Linux": gentoo, + "": noPrettyName, + } { + if err := ioutil.WriteFile(etcOsRelease, osRelease, 0600); err != nil { + t.Fatalf("failed to write to %s: %v", etcOsRelease, err) + } + s, err := GetOperatingSystem() + if s != expect { + if expect == "" { + t.Fatalf("Expected error 'PRETTY_NAME not found', but got %v", err) + } else { + t.Fatalf("Expected '%s', but got '%s'. Err=%v", expect, s, err) + } + } + } +} + +func TestIsContainerized(t *testing.T) { + var ( + backup = proc1Cgroup + nonContainerizedProc1Cgroup = []byte(`14:name=systemd:/ +13:hugetlb:/ +12:net_prio:/ +11:perf_event:/ +10:bfqio:/ +9:blkio:/ +8:net_cls:/ +7:freezer:/ +6:devices:/ +5:memory:/ +4:cpuacct:/ +3:cpu:/ +2:cpuset:/ +`) + containerizedProc1Cgroup = []byte(`9:perf_event:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +8:blkio:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +7:net_cls:/ +6:freezer:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +5:devices:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +4:memory:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +3:cpuacct:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +2:cpu:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +1:cpuset:/`) + ) + + dir := os.TempDir() + proc1Cgroup = filepath.Join(dir, "proc1Cgroup") + + defer func() { + os.Remove(proc1Cgroup) + proc1Cgroup = backup + }() + + if err := ioutil.WriteFile(proc1Cgroup, nonContainerizedProc1Cgroup, 0600); err != nil { + t.Fatalf("failed to write to %s: %v", proc1Cgroup, err) + } + inContainer, err := IsContainerized() + if err != nil { + t.Fatal(err) + } + if inContainer { + t.Fatal("Wrongly assuming containerized") + } + + if err := ioutil.WriteFile(proc1Cgroup, containerizedProc1Cgroup, 0600); err != nil { + t.Fatalf("failed to write to %s: %v", proc1Cgroup, err) + } + inContainer, err = IsContainerized() + if err != nil { + t.Fatal(err) + } + if !inContainer { + t.Fatal("Wrongly assuming non-containerized") + } +} diff --git a/vendor/src/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_windows.go b/vendor/src/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_windows.go new file mode 100644 index 00000000..3c86b6af --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_windows.go @@ -0,0 +1,49 @@ +package operatingsystem + +import ( + "syscall" + "unsafe" +) + +// See https://code.google.com/p/go/source/browse/src/pkg/mime/type_windows.go?r=d14520ac25bf6940785aabb71f5be453a286f58c +// for a similar sample + +// GetOperatingSystem gets the name of the current operating system. +func GetOperatingSystem() (string, error) { + + var h syscall.Handle + + // Default return value + ret := "Unknown Operating System" + + if err := syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE, + syscall.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`), + 0, + syscall.KEY_READ, + &h); err != nil { + return ret, err + } + defer syscall.RegCloseKey(h) + + var buf [1 << 10]uint16 + var typ uint32 + n := uint32(len(buf) * 2) // api expects array of bytes, not uint16 + + if err := syscall.RegQueryValueEx(h, + syscall.StringToUTF16Ptr("ProductName"), + nil, + &typ, + (*byte)(unsafe.Pointer(&buf[0])), + &n); err != nil { + return ret, err + } + ret = syscall.UTF16ToString(buf[:]) + + return ret, nil +} + +// IsContainerized returns true if we are running inside a container. +// No-op on Windows, always returns false. +func IsContainerized() (bool, error) { + return false, nil +} diff --git a/vendor/src/github.com/docker/docker/pkg/parsers/parsers.go b/vendor/src/github.com/docker/docker/pkg/parsers/parsers.go new file mode 100644 index 00000000..30b19329 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/parsers/parsers.go @@ -0,0 +1,198 @@ +// Package parsers provides helper functions to parse and validate different type +// of string. It can be hosts, unix addresses, tcp addresses, filters, kernel +// operating system versions. +package parsers + +import ( + "fmt" + "net/url" + "path" + "runtime" + "strconv" + "strings" +) + +// ParseDockerDaemonHost parses the specified address and returns an address that will be used as the host. +// Depending of the address specified, will use the defaultTCPAddr or defaultUnixAddr +// defaultUnixAddr must be a absolute file path (no `unix://` prefix) +// defaultTCPAddr must be the full `tcp://host:port` form +func ParseDockerDaemonHost(defaultTCPAddr, defaultUnixAddr, addr string) (string, error) { + addr = strings.TrimSpace(addr) + if addr == "" { + if runtime.GOOS != "windows" { + return fmt.Sprintf("unix://%s", defaultUnixAddr), nil + } + return defaultTCPAddr, nil + } + addrParts := strings.Split(addr, "://") + if len(addrParts) == 1 { + addrParts = []string{"tcp", addrParts[0]} + } + + switch addrParts[0] { + case "tcp": + return ParseTCPAddr(addrParts[1], defaultTCPAddr) + case "unix": + return ParseUnixAddr(addrParts[1], defaultUnixAddr) + case "fd": + return addr, nil + default: + return "", fmt.Errorf("Invalid bind address format: %s", addr) + } +} + +// ParseUnixAddr parses and validates that the specified address is a valid UNIX +// socket address. It returns a formatted UNIX socket address, either using the +// address parsed from addr, or the contents of defaultAddr if addr is a blank +// string. +func ParseUnixAddr(addr string, defaultAddr string) (string, error) { + addr = strings.TrimPrefix(addr, "unix://") + if strings.Contains(addr, "://") { + return "", fmt.Errorf("Invalid proto, expected unix: %s", addr) + } + if addr == "" { + addr = defaultAddr + } + return fmt.Sprintf("unix://%s", addr), nil +} + +// ParseTCPAddr parses and validates that the specified address is a valid TCP +// address. It returns a formatted TCP address, either using the address parsed +// from tryAddr, or the contents of defaultAddr if tryAddr is a blank string. +// tryAddr is expected to have already been Trim()'d +// defaultAddr must be in the full `tcp://host:port` form +func ParseTCPAddr(tryAddr string, defaultAddr string) (string, error) { + if tryAddr == "" || tryAddr == "tcp://" { + return defaultAddr, nil + } + addr := strings.TrimPrefix(tryAddr, "tcp://") + if strings.Contains(addr, "://") || addr == "" { + return "", fmt.Errorf("Invalid proto, expected tcp: %s", tryAddr) + } + + u, err := url.Parse("tcp://" + addr) + if err != nil { + return "", err + } + hostParts := strings.Split(u.Host, ":") + if len(hostParts) != 2 { + return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) + } + defaults := strings.Split(defaultAddr, ":") + if len(defaults) != 3 { + return "", fmt.Errorf("Invalid defaults address format: %s", defaultAddr) + } + + host := hostParts[0] + if host == "" { + host = strings.TrimPrefix(defaults[1], "//") + } + if hostParts[1] == "" { + hostParts[1] = defaults[2] + } + p, err := strconv.Atoi(hostParts[1]) + if err != nil && p == 0 { + return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) + } + return fmt.Sprintf("tcp://%s:%d%s", host, p, u.Path), nil +} + +// ParseRepositoryTag gets a repos name and returns the right reposName + tag|digest +// The tag can be confusing because of a port in a repository name. +// Ex: localhost.localdomain:5000/samalba/hipache:latest +// Digest ex: localhost:5000/foo/bar@sha256:bc8813ea7b3603864987522f02a76101c17ad122e1c46d790efc0fca78ca7bfb +func ParseRepositoryTag(repos string) (string, string) { + n := strings.Index(repos, "@") + if n >= 0 { + parts := strings.Split(repos, "@") + return parts[0], parts[1] + } + n = strings.LastIndex(repos, ":") + if n < 0 { + return repos, "" + } + if tag := repos[n+1:]; !strings.Contains(tag, "/") { + return repos[:n], tag + } + return repos, "" +} + +// PartParser parses and validates the specified string (data) using the specified template +// e.g. ip:public:private -> 192.168.0.1:80:8000 +func PartParser(template, data string) (map[string]string, error) { + // ip:public:private + var ( + templateParts = strings.Split(template, ":") + parts = strings.Split(data, ":") + out = make(map[string]string, len(templateParts)) + ) + if len(parts) != len(templateParts) { + return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template) + } + + for i, t := range templateParts { + value := "" + if len(parts) > i { + value = parts[i] + } + out[t] = value + } + return out, nil +} + +// ParseKeyValueOpt parses and validates the specified string as a key/value pair (key=value) +func ParseKeyValueOpt(opt string) (string, string, error) { + parts := strings.SplitN(opt, "=", 2) + if len(parts) != 2 { + return "", "", fmt.Errorf("Unable to parse key/value option: %s", opt) + } + return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil +} + +// ParsePortRange parses and validates the specified string as a port-range (8000-9000) +func ParsePortRange(ports string) (uint64, uint64, error) { + if ports == "" { + return 0, 0, fmt.Errorf("Empty string specified for ports.") + } + if !strings.Contains(ports, "-") { + start, err := strconv.ParseUint(ports, 10, 16) + end := start + return start, end, err + } + + parts := strings.Split(ports, "-") + start, err := strconv.ParseUint(parts[0], 10, 16) + if err != nil { + return 0, 0, err + } + end, err := strconv.ParseUint(parts[1], 10, 16) + if err != nil { + return 0, 0, err + } + if end < start { + return 0, 0, fmt.Errorf("Invalid range specified for the Port: %s", ports) + } + return start, end, nil +} + +// ParseLink parses and validates the specified string as a link format (name:alias) +func ParseLink(val string) (string, string, error) { + if val == "" { + return "", "", fmt.Errorf("empty string specified for links") + } + arr := strings.Split(val, ":") + if len(arr) > 2 { + return "", "", fmt.Errorf("bad format for links: %s", val) + } + if len(arr) == 1 { + return val, val, nil + } + // This is kept because we can actually get an HostConfig with links + // from an already created container and the format is not `foo:bar` + // but `/foo:/c1/bar` + if strings.HasPrefix(arr[0], "/") { + _, alias := path.Split(arr[1]) + return arr[0][1:], alias, nil + } + return arr[0], arr[1], nil +} diff --git a/vendor/src/github.com/docker/docker/pkg/parsers/parsers_test.go b/vendor/src/github.com/docker/docker/pkg/parsers/parsers_test.go new file mode 100644 index 00000000..d83722e8 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/parsers/parsers_test.go @@ -0,0 +1,240 @@ +package parsers + +import ( + "runtime" + "strings" + "testing" +) + +func TestParseDockerDaemonHost(t *testing.T) { + var ( + defaultHTTPHost = "tcp://127.0.0.1:2376" + defaultUnix = "/var/run/docker.sock" + defaultHOST = "unix:///var/run/docker.sock" + ) + if runtime.GOOS == "windows" { + defaultHOST = defaultHTTPHost + } + invalids := map[string]string{ + "0.0.0.0": "Invalid bind address format: 0.0.0.0", + "tcp:a.b.c.d": "Invalid bind address format: tcp:a.b.c.d", + "tcp:a.b.c.d/path": "Invalid bind address format: tcp:a.b.c.d/path", + "udp://127.0.0.1": "Invalid bind address format: udp://127.0.0.1", + "udp://127.0.0.1:2375": "Invalid bind address format: udp://127.0.0.1:2375", + "tcp://unix:///run/docker.sock": "Invalid bind address format: unix", + "tcp": "Invalid bind address format: tcp", + "unix": "Invalid bind address format: unix", + "fd": "Invalid bind address format: fd", + } + valids := map[string]string{ + "0.0.0.1:": "tcp://0.0.0.1:2376", + "0.0.0.1:5555": "tcp://0.0.0.1:5555", + "0.0.0.1:5555/path": "tcp://0.0.0.1:5555/path", + ":6666": "tcp://127.0.0.1:6666", + ":6666/path": "tcp://127.0.0.1:6666/path", + "": defaultHOST, + " ": defaultHOST, + " ": defaultHOST, + "tcp://": defaultHTTPHost, + "tcp://:7777": "tcp://127.0.0.1:7777", + "tcp://:7777/path": "tcp://127.0.0.1:7777/path", + " tcp://:7777/path ": "tcp://127.0.0.1:7777/path", + "unix:///run/docker.sock": "unix:///run/docker.sock", + "unix://": "unix:///var/run/docker.sock", + "fd://": "fd://", + "fd://something": "fd://something", + } + for invalidAddr, expectedError := range invalids { + if addr, err := ParseDockerDaemonHost(defaultHTTPHost, defaultUnix, invalidAddr); err == nil || err.Error() != expectedError { + t.Errorf("tcp %v address expected error %v return, got %s and addr %v", invalidAddr, expectedError, err, addr) + } + } + for validAddr, expectedAddr := range valids { + if addr, err := ParseDockerDaemonHost(defaultHTTPHost, defaultUnix, validAddr); err != nil || addr != expectedAddr { + t.Errorf("%v -> expected %v, got (%v) addr (%v)", validAddr, expectedAddr, err, addr) + } + } +} + +func TestParseTCP(t *testing.T) { + var ( + defaultHTTPHost = "tcp://127.0.0.1:2376" + ) + invalids := map[string]string{ + "0.0.0.0": "Invalid bind address format: 0.0.0.0", + "tcp:a.b.c.d": "Invalid bind address format: tcp:a.b.c.d", + "tcp:a.b.c.d/path": "Invalid bind address format: tcp:a.b.c.d/path", + "udp://127.0.0.1": "Invalid proto, expected tcp: udp://127.0.0.1", + "udp://127.0.0.1:2375": "Invalid proto, expected tcp: udp://127.0.0.1:2375", + } + valids := map[string]string{ + "": defaultHTTPHost, + "tcp://": defaultHTTPHost, + "0.0.0.1:": "tcp://0.0.0.1:2376", + "0.0.0.1:5555": "tcp://0.0.0.1:5555", + "0.0.0.1:5555/path": "tcp://0.0.0.1:5555/path", + ":6666": "tcp://127.0.0.1:6666", + ":6666/path": "tcp://127.0.0.1:6666/path", + "tcp://:7777": "tcp://127.0.0.1:7777", + "tcp://:7777/path": "tcp://127.0.0.1:7777/path", + } + for invalidAddr, expectedError := range invalids { + if addr, err := ParseTCPAddr(invalidAddr, defaultHTTPHost); err == nil || err.Error() != expectedError { + t.Errorf("tcp %v address expected error %v return, got %s and addr %v", invalidAddr, expectedError, err, addr) + } + } + for validAddr, expectedAddr := range valids { + if addr, err := ParseTCPAddr(validAddr, defaultHTTPHost); err != nil || addr != expectedAddr { + t.Errorf("%v -> expected %v, got %v and addr %v", validAddr, expectedAddr, err, addr) + } + } +} + +func TestParseInvalidUnixAddrInvalid(t *testing.T) { + if _, err := ParseUnixAddr("tcp://127.0.0.1", "unix:///var/run/docker.sock"); err == nil || err.Error() != "Invalid proto, expected unix: tcp://127.0.0.1" { + t.Fatalf("Expected an error, got %v", err) + } + if _, err := ParseUnixAddr("unix://tcp://127.0.0.1", "/var/run/docker.sock"); err == nil || err.Error() != "Invalid proto, expected unix: tcp://127.0.0.1" { + t.Fatalf("Expected an error, got %v", err) + } + if v, err := ParseUnixAddr("", "/var/run/docker.sock"); err != nil || v != "unix:///var/run/docker.sock" { + t.Fatalf("Expected an %v, got %v", v, "unix:///var/run/docker.sock") + } +} + +func TestParseRepositoryTag(t *testing.T) { + if repo, tag := ParseRepositoryTag("root"); repo != "root" || tag != "" { + t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "root", "", repo, tag) + } + if repo, tag := ParseRepositoryTag("root:tag"); repo != "root" || tag != "tag" { + t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "root", "tag", repo, tag) + } + if repo, digest := ParseRepositoryTag("root@sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"); repo != "root" || digest != "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" { + t.Errorf("Expected repo: '%s' and digest: '%s', got '%s' and '%s'", "root", "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", repo, digest) + } + if repo, tag := ParseRepositoryTag("user/repo"); repo != "user/repo" || tag != "" { + t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "user/repo", "", repo, tag) + } + if repo, tag := ParseRepositoryTag("user/repo:tag"); repo != "user/repo" || tag != "tag" { + t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "user/repo", "tag", repo, tag) + } + if repo, digest := ParseRepositoryTag("user/repo@sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"); repo != "user/repo" || digest != "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" { + t.Errorf("Expected repo: '%s' and digest: '%s', got '%s' and '%s'", "user/repo", "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", repo, digest) + } + if repo, tag := ParseRepositoryTag("url:5000/repo"); repo != "url:5000/repo" || tag != "" { + t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "url:5000/repo", "", repo, tag) + } + if repo, tag := ParseRepositoryTag("url:5000/repo:tag"); repo != "url:5000/repo" || tag != "tag" { + t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "url:5000/repo", "tag", repo, tag) + } + if repo, digest := ParseRepositoryTag("url:5000/repo@sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"); repo != "url:5000/repo" || digest != "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" { + t.Errorf("Expected repo: '%s' and digest: '%s', got '%s' and '%s'", "url:5000/repo", "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", repo, digest) + } +} + +func TestParseKeyValueOpt(t *testing.T) { + invalids := map[string]string{ + "": "Unable to parse key/value option: ", + "key": "Unable to parse key/value option: key", + } + for invalid, expectedError := range invalids { + if _, _, err := ParseKeyValueOpt(invalid); err == nil || err.Error() != expectedError { + t.Fatalf("Expected error %v for %v, got %v", expectedError, invalid, err) + } + } + valids := map[string][]string{ + "key=value": {"key", "value"}, + " key = value ": {"key", "value"}, + "key=value1=value2": {"key", "value1=value2"}, + " key = value1 = value2 ": {"key", "value1 = value2"}, + } + for valid, expectedKeyValue := range valids { + key, value, err := ParseKeyValueOpt(valid) + if err != nil { + t.Fatal(err) + } + if key != expectedKeyValue[0] || value != expectedKeyValue[1] { + t.Fatalf("Expected {%v: %v} got {%v: %v}", expectedKeyValue[0], expectedKeyValue[1], key, value) + } + } +} + +func TestParsePortRange(t *testing.T) { + if start, end, err := ParsePortRange("8000-8080"); err != nil || start != 8000 || end != 8080 { + t.Fatalf("Error: %s or Expecting {start,end} values {8000,8080} but found {%d,%d}.", err, start, end) + } +} + +func TestParsePortRangeEmpty(t *testing.T) { + if _, _, err := ParsePortRange(""); err == nil || err.Error() != "Empty string specified for ports." { + t.Fatalf("Expected error 'Empty string specified for ports.', got %v", err) + } +} + +func TestParsePortRangeWithNoRange(t *testing.T) { + start, end, err := ParsePortRange("8080") + if err != nil { + t.Fatal(err) + } + if start != 8080 || end != 8080 { + t.Fatalf("Expected start and end to be the same and equal to 8080, but were %v and %v", start, end) + } +} + +func TestParsePortRangeIncorrectRange(t *testing.T) { + if _, _, err := ParsePortRange("9000-8080"); err == nil || !strings.Contains(err.Error(), "Invalid range specified for the Port") { + t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) + } +} + +func TestParsePortRangeIncorrectEndRange(t *testing.T) { + if _, _, err := ParsePortRange("8000-a"); err == nil || !strings.Contains(err.Error(), "invalid syntax") { + t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) + } + + if _, _, err := ParsePortRange("8000-30a"); err == nil || !strings.Contains(err.Error(), "invalid syntax") { + t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) + } +} + +func TestParsePortRangeIncorrectStartRange(t *testing.T) { + if _, _, err := ParsePortRange("a-8000"); err == nil || !strings.Contains(err.Error(), "invalid syntax") { + t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) + } + + if _, _, err := ParsePortRange("30a-8000"); err == nil || !strings.Contains(err.Error(), "invalid syntax") { + t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) + } +} + +func TestParseLink(t *testing.T) { + name, alias, err := ParseLink("name:alias") + if err != nil { + t.Fatalf("Expected not to error out on a valid name:alias format but got: %v", err) + } + if name != "name" { + t.Fatalf("Link name should have been name, got %s instead", name) + } + if alias != "alias" { + t.Fatalf("Link alias should have been alias, got %s instead", alias) + } + // short format definition + name, alias, err = ParseLink("name") + if err != nil { + t.Fatalf("Expected not to error out on a valid name only format but got: %v", err) + } + if name != "name" { + t.Fatalf("Link name should have been name, got %s instead", name) + } + if alias != "name" { + t.Fatalf("Link alias should have been name, got %s instead", alias) + } + // empty string link definition is not allowed + if _, _, err := ParseLink(""); err == nil || !strings.Contains(err.Error(), "empty string specified for links") { + t.Fatalf("Expected error 'empty string specified for links' but got: %v", err) + } + // more than two colons are not allowed + if _, _, err := ParseLink("link:alias:wrong"); err == nil || !strings.Contains(err.Error(), "bad format for links: link:alias:wrong") { + t.Fatalf("Expected error 'bad format for links: link:alias:wrong' but got: %v", err) + } +} From 1dfa7167955154e93cd8fc674c1a3620ae338e4e Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Wed, 23 Sep 2015 09:24:36 +0300 Subject: [PATCH 036/131] EXPOSE impl --- src/rocker/build2/commands.go | 52 ++++++++++++++++++++++++++++++ src/rocker/build2/commands_test.go | 43 ++++++++++++++++++++++++ 2 files changed, 95 insertions(+) diff --git a/src/rocker/build2/commands.go b/src/rocker/build2/commands.go index 7da73d3d..63ad46c1 100644 --- a/src/rocker/build2/commands.go +++ b/src/rocker/build2/commands.go @@ -19,9 +19,11 @@ package build2 import ( "fmt" "path/filepath" + "sort" "strings" log "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/nat" "github.com/docker/docker/pkg/units" "github.com/fsouza/go-dockerclient" ) @@ -74,6 +76,8 @@ func NewCommand(cfg ConfigCommand) (Command, error) { return &CommandCmd{cfg}, nil case "entrypoint": return &CommandEntrypoint{cfg}, nil + case "expose": + return &CommandExpose{cfg}, nil } return nil, fmt.Errorf("Unknown command: %s", cfg.name) } @@ -469,6 +473,54 @@ func (c *CommandEntrypoint) Execute(b *Build) (s State, err error) { return s, nil } +// CommandExpose implements EXPOSE +type CommandExpose struct { + cfg ConfigCommand +} + +func (c *CommandExpose) String() string { + return c.cfg.original +} + +func (c *CommandExpose) Execute(b *Build) (s State, err error) { + + s = b.state + + if len(c.cfg.args) == 0 { + return s, fmt.Errorf("EXPOSE requires at least one argument") + } + + if s.Config.ExposedPorts == nil { + s.Config.ExposedPorts = map[docker.Port]struct{}{} + } + + ports, _, err := nat.ParsePortSpecs(c.cfg.args) + if err != nil { + return s, err + } + + // instead of using ports directly, we build a list of ports and sort it so + // the order is consistent. This prevents cache burst where map ordering + // changes between builds + portList := make([]string, len(ports)) + var i int + for port := range ports { + dockerPort := docker.Port(port) + if _, exists := s.Config.ExposedPorts[dockerPort]; !exists { + s.Config.ExposedPorts[dockerPort] = struct{}{} + } + portList[i] = string(port) + i++ + } + sort.Strings(portList) + // return b.commit("", b.Config.Cmd, fmt.Sprintf("EXPOSE %s", strings.Join(portList, " "))) + + message := fmt.Sprintf("EXPOSE %s", strings.Join(portList, " ")) + s.Commit(message) + + return s, nil +} + // CommandTag implements TAG type CommandTag struct { cfg ConfigCommand diff --git a/src/rocker/build2/commands_test.go b/src/rocker/build2/commands_test.go index ed132002..93a6c220 100644 --- a/src/rocker/build2/commands_test.go +++ b/src/rocker/build2/commands_test.go @@ -443,6 +443,49 @@ func TestCommandEntrypoint_Remove(t *testing.T) { assert.Equal(t, []string(nil), state.Config.Entrypoint) } +// =========== Testing EXPOSE =========== + +func TestCommandExpose_Simple(t *testing.T) { + b, _ := makeBuild(t, "", Config{}) + cmd := &CommandExpose{ConfigCommand{ + args: []string{"80"}, + }} + + state, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + expectedPorts := map[docker.Port]struct{}{ + docker.Port("80/tcp"): struct{}{}, + } + + assert.True(t, reflect.DeepEqual(expectedPorts, state.Config.ExposedPorts), "bad exposed ports") +} + +func TestCommandExpose_Add(t *testing.T) { + b, _ := makeBuild(t, "", Config{}) + cmd := &CommandExpose{ConfigCommand{ + args: []string{"443"}, + }} + + b.state.Config.ExposedPorts = map[docker.Port]struct{}{ + docker.Port("80/tcp"): struct{}{}, + } + + state, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + expectedPorts := map[docker.Port]struct{}{ + docker.Port("80/tcp"): struct{}{}, + docker.Port("443/tcp"): struct{}{}, + } + + assert.True(t, reflect.DeepEqual(expectedPorts, state.Config.ExposedPorts), "bad exposed ports") +} + // =========== Testing COPY =========== func TestCommandCopy_Simple(t *testing.T) { From b108759e47bbe3a3f2425186204ac3347ef334ae Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Wed, 23 Sep 2015 09:32:42 +0300 Subject: [PATCH 037/131] VOLUME impl --- src/rocker/build2/commands.go | 35 ++++++++++++++++++++++++ src/rocker/build2/commands_test.go | 43 ++++++++++++++++++++++++++++++ 2 files changed, 78 insertions(+) diff --git a/src/rocker/build2/commands.go b/src/rocker/build2/commands.go index 63ad46c1..ba0716f2 100644 --- a/src/rocker/build2/commands.go +++ b/src/rocker/build2/commands.go @@ -78,6 +78,8 @@ func NewCommand(cfg ConfigCommand) (Command, error) { return &CommandEntrypoint{cfg}, nil case "expose": return &CommandExpose{cfg}, nil + case "volume": + return &CommandVolume{cfg}, nil } return nil, fmt.Errorf("Unknown command: %s", cfg.name) } @@ -521,6 +523,39 @@ func (c *CommandExpose) Execute(b *Build) (s State, err error) { return s, nil } +// CommandVolume implements VOLUME +type CommandVolume struct { + cfg ConfigCommand +} + +func (c *CommandVolume) String() string { + return c.cfg.original +} + +func (c *CommandVolume) Execute(b *Build) (s State, err error) { + + s = b.state + + if len(c.cfg.args) == 0 { + return s, fmt.Errorf("VOLUME requires at least one argument") + } + + if s.Config.Volumes == nil { + s.Config.Volumes = map[string]struct{}{} + } + for _, v := range c.cfg.args { + v = strings.TrimSpace(v) + if v == "" { + return s, fmt.Errorf("Volume specified can not be an empty string") + } + s.Config.Volumes[v] = struct{}{} + } + + s.Commit(fmt.Sprintf("VOLUME %v", c.cfg.args)) + + return s, nil +} + // CommandTag implements TAG type CommandTag struct { cfg ConfigCommand diff --git a/src/rocker/build2/commands_test.go b/src/rocker/build2/commands_test.go index 93a6c220..e83f1d9f 100644 --- a/src/rocker/build2/commands_test.go +++ b/src/rocker/build2/commands_test.go @@ -486,6 +486,49 @@ func TestCommandExpose_Add(t *testing.T) { assert.True(t, reflect.DeepEqual(expectedPorts, state.Config.ExposedPorts), "bad exposed ports") } +// =========== Testing VOLUME =========== + +func TestCommandVolume_Simple(t *testing.T) { + b, _ := makeBuild(t, "", Config{}) + cmd := &CommandVolume{ConfigCommand{ + args: []string{"/data"}, + }} + + state, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + volumes := map[string]struct{}{ + "/data": struct{}{}, + } + + assert.True(t, reflect.DeepEqual(volumes, state.Config.Volumes), "bad volumes") +} + +func TestCommandVolume_Add(t *testing.T) { + b, _ := makeBuild(t, "", Config{}) + cmd := &CommandVolume{ConfigCommand{ + args: []string{"/var/log"}, + }} + + b.state.Config.Volumes = map[string]struct{}{ + "/data": struct{}{}, + } + + state, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + volumes := map[string]struct{}{ + "/data": struct{}{}, + "/var/log": struct{}{}, + } + + assert.True(t, reflect.DeepEqual(volumes, state.Config.Volumes), "bad volumes") +} + // =========== Testing COPY =========== func TestCommandCopy_Simple(t *testing.T) { From 99e7e987ab5e12bdaaad0e40892abbc22272770f Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Wed, 23 Sep 2015 09:32:47 +0300 Subject: [PATCH 038/131] cleanup --- src/rocker/build2/commands.go | 1 - 1 file changed, 1 deletion(-) diff --git a/src/rocker/build2/commands.go b/src/rocker/build2/commands.go index ba0716f2..5e2e9a54 100644 --- a/src/rocker/build2/commands.go +++ b/src/rocker/build2/commands.go @@ -515,7 +515,6 @@ func (c *CommandExpose) Execute(b *Build) (s State, err error) { i++ } sort.Strings(portList) - // return b.commit("", b.Config.Cmd, fmt.Sprintf("EXPOSE %s", strings.Join(portList, " "))) message := fmt.Sprintf("EXPOSE %s", strings.Join(portList, " ")) s.Commit(message) From d861e400a460bdb6a6fecb4bfcbf24018b42e358 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Wed, 23 Sep 2015 09:32:56 +0300 Subject: [PATCH 039/131] add TODO to update parser --- src/rocker/build2/rockerfile.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/rocker/build2/rockerfile.go b/src/rocker/build2/rockerfile.go index 2b56a389..433ee241 100644 --- a/src/rocker/build2/rockerfile.go +++ b/src/rocker/build2/rockerfile.go @@ -71,6 +71,8 @@ func NewRockerfile(name string, in io.Reader, vars template.Vars, funs template. r.Content = content.String() + // TODO: update parser from Docker + if r.rootNode, err = parser.Parse(content); err != nil { return nil, err } From b294833e547daf563a1b77d9afd2078e744ced5d Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Wed, 23 Sep 2015 09:36:26 +0300 Subject: [PATCH 040/131] USER impl --- src/rocker/build2/commands.go | 26 ++++++++++++++++++++++++++ src/rocker/build2/commands_test.go | 16 ++++++++++++++++ 2 files changed, 42 insertions(+) diff --git a/src/rocker/build2/commands.go b/src/rocker/build2/commands.go index 5e2e9a54..e3529c00 100644 --- a/src/rocker/build2/commands.go +++ b/src/rocker/build2/commands.go @@ -80,6 +80,8 @@ func NewCommand(cfg ConfigCommand) (Command, error) { return &CommandExpose{cfg}, nil case "volume": return &CommandVolume{cfg}, nil + case "user": + return &CommandUser{cfg}, nil } return nil, fmt.Errorf("Unknown command: %s", cfg.name) } @@ -555,6 +557,30 @@ func (c *CommandVolume) Execute(b *Build) (s State, err error) { return s, nil } +// CommandUser implements USER +type CommandUser struct { + cfg ConfigCommand +} + +func (c *CommandUser) String() string { + return c.cfg.original +} + +func (c *CommandUser) Execute(b *Build) (s State, err error) { + + s = b.state + + if len(c.cfg.args) != 1 { + return s, fmt.Errorf("USER requires exactly one argument") + } + + s.Config.User = c.cfg.args[0] + + s.Commit(fmt.Sprintf("USER %v", c.cfg.args)) + + return s, nil +} + // CommandTag implements TAG type CommandTag struct { cfg ConfigCommand diff --git a/src/rocker/build2/commands_test.go b/src/rocker/build2/commands_test.go index e83f1d9f..9039abd8 100644 --- a/src/rocker/build2/commands_test.go +++ b/src/rocker/build2/commands_test.go @@ -529,6 +529,22 @@ func TestCommandVolume_Add(t *testing.T) { assert.True(t, reflect.DeepEqual(volumes, state.Config.Volumes), "bad volumes") } +// =========== Testing USER =========== + +func TestCommandUser_Simple(t *testing.T) { + b, _ := makeBuild(t, "", Config{}) + cmd := &CommandUser{ConfigCommand{ + args: []string{"www"}, + }} + + state, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, "www", state.Config.User) +} + // =========== Testing COPY =========== func TestCommandCopy_Simple(t *testing.T) { From 84276cd608fdac821581bff8db39f3a5a94eb0cb Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Wed, 23 Sep 2015 09:37:11 +0300 Subject: [PATCH 041/131] add TODO --- src/rocker/build2/client.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/rocker/build2/client.go b/src/rocker/build2/client.go index f519730d..0e9ae2f5 100644 --- a/src/rocker/build2/client.go +++ b/src/rocker/build2/client.go @@ -220,6 +220,8 @@ func (c *DockerClient) RunContainer(containerID string, attachStdin bool) error success <- <-success + // TODO: support options for container resources constraints like `docker build` has + if err := c.client.StartContainer(containerID, &docker.HostConfig{}); err != nil { return err } From 3daab20f282b7b2c9a00fe92f9fc2d306f2095f2 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Wed, 23 Sep 2015 10:14:12 +0300 Subject: [PATCH 042/131] ONBUILD almost done --- src/rocker/build2/build.go | 22 +++++++--- src/rocker/build2/commands.go | 32 +++++++++++++- src/rocker/build2/rockerfile.go | 64 +++++++++++++++++++++------- src/rocker/build2/rockerfile_test.go | 16 +++++++ 4 files changed, 109 insertions(+), 25 deletions(-) diff --git a/src/rocker/build2/build.go b/src/rocker/build2/build.go index 4cc8570b..760feaf2 100644 --- a/src/rocker/build2/build.go +++ b/src/rocker/build2/build.go @@ -40,12 +40,13 @@ type Config struct { } type State struct { - Config docker.Config - ImageID string - ContainerID string - CommitMsg []string - ProducedImage bool - CmdSet bool + Config docker.Config + ImageID string + ContainerID string + CommitMsg []string + ProducedImage bool + CmdSet bool + InjectCommands []Command } type Build struct { @@ -66,7 +67,8 @@ func New(client Client, rockerfile *Rockerfile, cfg Config) *Build { func (b *Build) Run(plan Plan) (err error) { - for k, c := range plan { + for k := 0; k < len(plan); k++ { + c := plan[k] log.Debugf("Step %d: %# v", k+1, pretty.Formatter(c)) log.Infof("%s", color.New(color.FgWhite, color.Bold).SprintFunc()(c)) @@ -77,6 +79,12 @@ func (b *Build) Run(plan Plan) (err error) { } log.Debugf("State after step %d: %# v", k+1, pretty.Formatter(b.state)) + + if len(b.state.InjectCommands) > 0 { + tail := append(b.state.InjectCommands, plan[k+1:]...) + plan = append(plan[:k+1], tail...) + b.state.InjectCommands = []Command{} + } } return nil diff --git a/src/rocker/build2/commands.go b/src/rocker/build2/commands.go index e3529c00..45a61f33 100644 --- a/src/rocker/build2/commands.go +++ b/src/rocker/build2/commands.go @@ -52,7 +52,7 @@ type Command interface { } func NewCommand(cfg ConfigCommand) (Command, error) { - // TODO: use reflection + // TODO: use reflection? switch cfg.name { case "from": return &CommandFrom{cfg}, nil @@ -138,6 +138,21 @@ func (c *CommandFrom) Execute(b *Build) (s State, err error) { s.ImageID = img.ID s.Config = *img.Config + // If we don't have OnBuild triggers, then we are done + if len(s.Config.OnBuild) == 0 { + return s, nil + } + + log.Infof("| Found %d ONBUILD triggers", len(s.Config.OnBuild)) + + // parse the ONBUILD triggers by invoking the parser + if s.InjectCommands, err = parseOnbuildCommands(s.Config.OnBuild); err != nil { + return s, err + } + + // Remove them from the config, since the config will be committed. + s.Config.OnBuild = []string{} + return s, nil } @@ -610,7 +625,7 @@ func (c *CommandCopy) Execute(b *Build) (State, error) { return copyFiles(b, c.cfg.args, "COPY") } -// CommandCopy implements ADD +// CommandAdd implements ADD // For now it is an alias of COPY, but later will add urls and archives to it type CommandAdd struct { cfg ConfigCommand @@ -626,3 +641,16 @@ func (c *CommandAdd) Execute(b *Build) (State, error) { } return copyFiles(b, c.cfg.args, "ADD") } + +// CommandOnbuildWrap wraps ONBUILD command +type CommandOnbuildWrap struct { + cmd Command +} + +func (c *CommandOnbuildWrap) String() string { + return "ONBUILD " + c.cmd.String() +} + +func (c *CommandOnbuildWrap) Execute(b *Build) (State, error) { + return c.cmd.Execute(b) +} diff --git a/src/rocker/build2/rockerfile.go b/src/rocker/build2/rockerfile.go index 433ee241..d9e8d1c2 100644 --- a/src/rocker/build2/rockerfile.go +++ b/src/rocker/build2/rockerfile.go @@ -84,22 +84,7 @@ func (r *Rockerfile) Commands() []ConfigCommand { commands := []ConfigCommand{} for i := 0; i < len(r.rootNode.Children); i++ { - node := r.rootNode.Children[i] - - cfg := ConfigCommand{ - name: node.Value, - attrs: node.Attributes, - original: node.Original, - args: []string{}, - flags: parseFlags(node.Flags), - } - - // fill in args and substitute vars - for n := node.Next; n != nil; n = n.Next { - cfg.args = append(cfg.args, n.Value) - } - - commands = append(commands, cfg) + commands = append(commands, parseCommand(r.rootNode.Children[i])) } return commands @@ -118,6 +103,53 @@ func handleJSONArgs(args []string, attributes map[string]bool) []string { return []string{strings.Join(args, " ")} } +func parseCommand(node *parser.Node) ConfigCommand { + cfg := ConfigCommand{ + name: node.Value, + attrs: node.Attributes, + original: node.Original, + args: []string{}, + flags: parseFlags(node.Flags), + } + + // fill in args and substitute vars + for n := node.Next; n != nil; n = n.Next { + cfg.args = append(cfg.args, n.Value) + } + + return cfg +} + +func parseOnbuildCommands(onBuildTriggers []string) ([]Command, error) { + commands := []Command{} + + for _, step := range onBuildTriggers { + + ast, err := parser.Parse(strings.NewReader(step)) + if err != nil { + return commands, err + } + + for _, n := range ast.Children { + switch strings.ToUpper(n.Value) { + case "ONBUILD": + return commands, fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") + case "MAINTAINER", "FROM": + return commands, fmt.Errorf("%s isn't allowed as an ONBUILD trigger", n.Value) + } + + cmd, err := NewCommand(parseCommand(n)) + if err != nil { + return commands, err + } + + commands = append(commands, &CommandOnbuildWrap{cmd}) + } + } + + return commands, nil +} + func parseFlags(flags []string) map[string]string { result := make(map[string]string) for _, flag := range flags { diff --git a/src/rocker/build2/rockerfile_test.go b/src/rocker/build2/rockerfile_test.go index 16ec9be6..be7ac077 100644 --- a/src/rocker/build2/rockerfile_test.go +++ b/src/rocker/build2/rockerfile_test.go @@ -21,6 +21,7 @@ import ( "strings" "testing" + "github.com/kr/pretty" "github.com/stretchr/testify/assert" ) @@ -57,3 +58,18 @@ func TestRockerfileCommands(t *testing.T) { assert.Equal(t, "from", commands[0].name) assert.Equal(t, "ubuntu", commands[0].args[0]) } + +func TestRockerfileParseOnbuildCommands(t *testing.T) { + triggers := []string{ + "RUN make", + "RUN make install", + } + + commands, err := parseOnbuildCommands(triggers) + if err != nil { + t.Fatal(err) + } + + // TODO: asserts + pretty.Println(commands) +} From e62d52aeb6cbcf89791abbfeb2b65574f64283a1 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Wed, 23 Sep 2015 10:14:53 +0300 Subject: [PATCH 043/131] COPY add TODO --- src/rocker/build2/copy_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/rocker/build2/copy_test.go b/src/rocker/build2/copy_test.go index 98216b73..04653922 100644 --- a/src/rocker/build2/copy_test.go +++ b/src/rocker/build2/copy_test.go @@ -164,6 +164,8 @@ func TestListFiles_Dir_AndFiles(t *testing.T) { } } +// TODO: COPY . /go/src/app + func TestListFiles_Dir_Multi(t *testing.T) { tmpDir := makeTmpDir(t, map[string]string{ "a/test.txt": "hello", From bda89786256250a656f23696cfaefa6f07f7b2c4 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Wed, 23 Sep 2015 14:15:50 +0300 Subject: [PATCH 044/131] ONBUILD done --- src/cmd/rocker/main.go | 2 +- src/rocker/build2/build.go | 20 ++++++++++++++++---- src/rocker/build2/commands.go | 27 +++++++++++---------------- src/rocker/build2/commands_test.go | 3 +-- src/rocker/build2/plan.go | 9 +++------ src/rocker/build2/plan_test.go | 2 +- src/rocker/build2/rockerfile.go | 26 +++++++++++--------------- src/rocker/build2/rockerfile_test.go | 8 +++++--- 8 files changed, 49 insertions(+), 48 deletions(-) diff --git a/src/cmd/rocker/main.go b/src/cmd/rocker/main.go index adf4111b..232448f5 100644 --- a/src/cmd/rocker/main.go +++ b/src/cmd/rocker/main.go @@ -268,7 +268,7 @@ func buildCommand(c *cli.Context) { NoGarbage: c.Bool("no-garbage"), }) - plan, err := build2.NewPlan(builder) + plan, err := build2.NewPlan(rockerfile.Commands(), true) if err != nil { log.Fatal(err) } diff --git a/src/rocker/build2/build.go b/src/rocker/build2/build.go index 760feaf2..97bf67b6 100644 --- a/src/rocker/build2/build.go +++ b/src/rocker/build2/build.go @@ -46,7 +46,7 @@ type State struct { CommitMsg []string ProducedImage bool CmdSet bool - InjectCommands []Command + InjectCommands []string } type Build struct { @@ -72,7 +72,6 @@ func (b *Build) Run(plan Plan) (err error) { log.Debugf("Step %d: %# v", k+1, pretty.Formatter(c)) log.Infof("%s", color.New(color.FgWhite, color.Bold).SprintFunc()(c)) - // log.Infof("%s", color.New(color.FgBlue).SprintFunc()(c)) if b.state, err = c.Execute(b); err != nil { return err @@ -80,10 +79,23 @@ func (b *Build) Run(plan Plan) (err error) { log.Debugf("State after step %d: %# v", k+1, pretty.Formatter(b.state)) + // Here we need to inject ONBUILD commands on the fly, + // build sub plan and merge it with the main plan. + // Not very beautiful, because Run uses Plan as the argument + // and then it builds its own. But. if len(b.state.InjectCommands) > 0 { - tail := append(b.state.InjectCommands, plan[k+1:]...) + commands, err := parseOnbuildCommands(b.state.InjectCommands) + if err != nil { + return err + } + subPlan, err := NewPlan(commands, false) + if err != nil { + return err + } + tail := append(subPlan, plan[k+1:]...) plan = append(plan[:k+1], tail...) - b.state.InjectCommands = []Command{} + + b.state.InjectCommands = []string{} } } diff --git a/src/rocker/build2/commands.go b/src/rocker/build2/commands.go index 45a61f33..96d78686 100644 --- a/src/rocker/build2/commands.go +++ b/src/rocker/build2/commands.go @@ -33,11 +33,12 @@ const ( ) type ConfigCommand struct { - name string - args []string - attrs map[string]bool - flags map[string]string - original string + name string + args []string + attrs map[string]bool + flags map[string]string + original string + isOnbuild bool } type Command interface { @@ -145,12 +146,8 @@ func (c *CommandFrom) Execute(b *Build) (s State, err error) { log.Infof("| Found %d ONBUILD triggers", len(s.Config.OnBuild)) - // parse the ONBUILD triggers by invoking the parser - if s.InjectCommands, err = parseOnbuildCommands(s.Config.OnBuild); err != nil { - return s, err - } - // Remove them from the config, since the config will be committed. + s.InjectCommands = s.Config.OnBuild s.Config.OnBuild = []string{} return s, nil @@ -165,16 +162,14 @@ func (c *CommandMaintainer) String() string { return c.cfg.original } -func (c *CommandMaintainer) Execute(b *Build) (s State, err error) { - s = b.state +func (c *CommandMaintainer) Execute(b *Build) (State, error) { if len(c.cfg.args) != 1 { - return s, fmt.Errorf("MAINTAINER requires exactly one argument") + return b.state, fmt.Errorf("MAINTAINER requires exactly one argument") } // Don't see any sense of doing a commit here, as Docker does - s.SkipCommit() - return s, nil + return b.state, nil } // CommandReset cleans the builder state before the next FROM @@ -225,7 +220,7 @@ func (c *CommandCommit) Execute(b *Build) (s State, err error) { // Reset collected commit messages after the commit s.CommitMsg = []string{} - if len(commits) == 0 { + if len(commits) == 0 && s.ContainerID == "" { log.Infof("| Skip") return s, nil } diff --git a/src/rocker/build2/commands_test.go b/src/rocker/build2/commands_test.go index 9039abd8..e06a3519 100644 --- a/src/rocker/build2/commands_test.go +++ b/src/rocker/build2/commands_test.go @@ -315,8 +315,7 @@ func TestCommandMaintainer_Simple(t *testing.T) { t.Fatal(err) } - assert.Len(t, state.CommitMsg, 1) - assert.Equal(t, COMMIT_SKIP, state.CommitMsg[0]) + assert.Len(t, state.CommitMsg, 0) } // =========== Testing WORKDIR =========== diff --git a/src/rocker/build2/plan.go b/src/rocker/build2/plan.go index 05f27fe1..d9dd9524 100644 --- a/src/rocker/build2/plan.go +++ b/src/rocker/build2/plan.go @@ -20,10 +20,9 @@ import "strings" type Plan []Command -func NewPlan(b *Build) (plan Plan, err error) { +func NewPlan(commands []ConfigCommand, finalCleanup bool) (plan Plan, err error) { plan = Plan{} - commands := b.rockerfile.Commands() committed := true commit := func() { @@ -40,9 +39,7 @@ func NewPlan(b *Build) (plan Plan, err error) { alwaysCommitBefore := "run attach add copy tag push" alwaysCommitAfter := "run attach add copy" - neverCommitAfter := "from tag push" - - // TODO: Process ONBUILD triggers if they exist + neverCommitAfter := "from maintainer tag push" for i := 0; i < len(commands); i++ { cfg := commands[i] @@ -87,7 +84,7 @@ func NewPlan(b *Build) (plan Plan, err error) { } // Always cleanup at the end - if i == len(commands)-1 { + if i == len(commands)-1 && finalCleanup { cleanup(i) } } diff --git a/src/rocker/build2/plan_test.go b/src/rocker/build2/plan_test.go index 460164b0..e2608f95 100644 --- a/src/rocker/build2/plan_test.go +++ b/src/rocker/build2/plan_test.go @@ -335,7 +335,7 @@ FROM alpine func makePlan(t *testing.T, rockerfileContent string) Plan { b, _ := makeBuild(t, rockerfileContent, Config{}) - p, err := NewPlan(b) + p, err := NewPlan(b.rockerfile.Commands(), true) if err != nil { t.Fatal(err) } diff --git a/src/rocker/build2/rockerfile.go b/src/rocker/build2/rockerfile.go index d9e8d1c2..041d1680 100644 --- a/src/rocker/build2/rockerfile.go +++ b/src/rocker/build2/rockerfile.go @@ -84,7 +84,7 @@ func (r *Rockerfile) Commands() []ConfigCommand { commands := []ConfigCommand{} for i := 0; i < len(r.rootNode.Children); i++ { - commands = append(commands, parseCommand(r.rootNode.Children[i])) + commands = append(commands, parseCommand(r.rootNode.Children[i], false)) } return commands @@ -103,13 +103,14 @@ func handleJSONArgs(args []string, attributes map[string]bool) []string { return []string{strings.Join(args, " ")} } -func parseCommand(node *parser.Node) ConfigCommand { +func parseCommand(node *parser.Node, isOnbuild bool) ConfigCommand { cfg := ConfigCommand{ - name: node.Value, - attrs: node.Attributes, - original: node.Original, - args: []string{}, - flags: parseFlags(node.Flags), + name: node.Value, + attrs: node.Attributes, + original: node.Original, + args: []string{}, + flags: parseFlags(node.Flags), + isOnbuild: isOnbuild, } // fill in args and substitute vars @@ -120,8 +121,8 @@ func parseCommand(node *parser.Node) ConfigCommand { return cfg } -func parseOnbuildCommands(onBuildTriggers []string) ([]Command, error) { - commands := []Command{} +func parseOnbuildCommands(onBuildTriggers []string) ([]ConfigCommand, error) { + commands := []ConfigCommand{} for _, step := range onBuildTriggers { @@ -138,12 +139,7 @@ func parseOnbuildCommands(onBuildTriggers []string) ([]Command, error) { return commands, fmt.Errorf("%s isn't allowed as an ONBUILD trigger", n.Value) } - cmd, err := NewCommand(parseCommand(n)) - if err != nil { - return commands, err - } - - commands = append(commands, &CommandOnbuildWrap{cmd}) + commands = append(commands, parseCommand(n, true)) } } diff --git a/src/rocker/build2/rockerfile_test.go b/src/rocker/build2/rockerfile_test.go index be7ac077..700f8a42 100644 --- a/src/rocker/build2/rockerfile_test.go +++ b/src/rocker/build2/rockerfile_test.go @@ -21,7 +21,6 @@ import ( "strings" "testing" - "github.com/kr/pretty" "github.com/stretchr/testify/assert" ) @@ -70,6 +69,9 @@ func TestRockerfileParseOnbuildCommands(t *testing.T) { t.Fatal(err) } - // TODO: asserts - pretty.Println(commands) + assert.Len(t, commands, 2) + assert.Equal(t, "run", commands[0].name) + assert.Equal(t, []string{"make"}, commands[0].args) + assert.Equal(t, "run", commands[1].name) + assert.Equal(t, []string{"make install"}, commands[1].args) } From 990756207a29bb4afcb8dfeb8a0372c10a36745e Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Wed, 23 Sep 2015 14:16:02 +0300 Subject: [PATCH 045/131] COPY improve --- src/rocker/build2/copy.go | 17 ++++++++++----- src/rocker/build2/copy_test.go | 38 ++++++++++++++++++++++++++++++++-- 2 files changed, 48 insertions(+), 7 deletions(-) diff --git a/src/rocker/build2/copy.go b/src/rocker/build2/copy.go index b91d957a..f86fa22c 100644 --- a/src/rocker/build2/copy.go +++ b/src/rocker/build2/copy.go @@ -68,6 +68,11 @@ func copyFiles(b *Build, args []string, cmdName string) (s State, err error) { excludes = []string{} ) + // If destination is not a directory (no leading slash) + if !strings.HasSuffix(dest, string(os.PathSeparator)) && len(src) > 1 { + return s, fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName) + } + if u, err = makeTarStream(b.cfg.ContextDir, dest, cmdName, src, excludes); err != nil { return s, err } @@ -142,13 +147,15 @@ func makeTarStream(srcPath, dest, cmdName string, includes, excludes []string) ( // If destination is not a directory (no leading slash) if !strings.HasSuffix(u.dest, sep) { - if len(u.files) > 1 { - return u, fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName) - } // If we transfer a single file and the destination is not a directory, // then rename it and remove prefix - u.files[0].dest = strings.TrimLeft(u.dest, sep) - u.dest = "" + if len(u.files) == 1 { + u.files[0].dest = strings.TrimLeft(u.dest, sep) + u.dest = "" + } else { + // add leading slash for more then one file + u.dest += sep + } } // Cut the slash prefix from the dest, because it will be the root of the tar diff --git a/src/rocker/build2/copy_test.go b/src/rocker/build2/copy_test.go index 04653922..b0728486 100644 --- a/src/rocker/build2/copy_test.go +++ b/src/rocker/build2/copy_test.go @@ -164,8 +164,6 @@ func TestListFiles_Dir_AndFiles(t *testing.T) { } } -// TODO: COPY . /go/src/app - func TestListFiles_Dir_Multi(t *testing.T) { tmpDir := makeTmpDir(t, map[string]string{ "a/test.txt": "hello", @@ -297,6 +295,42 @@ func TestMakeTarStream_OneFileToDir(t *testing.T) { assert.Equal(t, assertion, out, "bad tar content") } +func TestMakeTarStream_CurrentDir(t *testing.T) { + tmpDir := makeTmpDir(t, map[string]string{ + "a/test.txt": "hello", + "b/1.txt": "hello", + "b/2.txt": "hello", + "c/foo.txt": "hello", + "c/x/1.txt": "hello", + "c/x/2.txt": "hello", + }) + defer os.RemoveAll(tmpDir) + + includes := []string{ + ".", + } + excludes := []string{} + dest := "/go/app/src" + + stream, err := makeTarStream(tmpDir, dest, "COPY", includes, excludes) + if err != nil { + t.Fatal(err) + } + + out := writeReadTar(t, tmpDir, stream.tar) + + assertion := strings.Join([]string{ + "go/app/src/a/test.txt", + "go/app/src/b/1.txt", + "go/app/src/b/2.txt", + "go/app/src/c/foo.txt", + "go/app/src/c/x/1.txt", + "go/app/src/c/x/2.txt", + }, "\n") + "\n" + + assert.Equal(t, assertion, out, "bad tar content") +} + // helper functions func makeTmpDir(t *testing.T, files map[string]string) string { From 1a6b2e9d71648dea823d85068aa84dea04d7ad80 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Thu, 24 Sep 2015 09:10:11 +0300 Subject: [PATCH 046/131] TAG impl --- src/rocker/build2/build_test.go | 5 ++++ src/rocker/build2/client.go | 18 +++++++++++ src/rocker/build2/commands.go | 12 ++++++++ src/rocker/build2/commands_test.go | 48 ++++++++++++++++++++++++++++++ 4 files changed, 83 insertions(+) diff --git a/src/rocker/build2/build_test.go b/src/rocker/build2/build_test.go index 444d3031..efd65579 100644 --- a/src/rocker/build2/build_test.go +++ b/src/rocker/build2/build_test.go @@ -69,6 +69,11 @@ func (m *MockClient) RemoveImage(imageID string) error { return args.Error(0) } +func (m *MockClient) TagImage(imageID, imageName string) error { + args := m.Called(imageID, imageName) + return args.Error(0) +} + func (m *MockClient) CreateContainer(state State) (string, error) { args := m.Called(state) return args.String(0), args.Error(1) diff --git a/src/rocker/build2/client.go b/src/rocker/build2/client.go index 0e9ae2f5..3bbd5e44 100644 --- a/src/rocker/build2/client.go +++ b/src/rocker/build2/client.go @@ -21,6 +21,7 @@ import ( "io" "os" "os/signal" + "rocker/imagename" "github.com/docker/docker/pkg/units" @@ -37,6 +38,7 @@ type Client interface { InspectImage(name string) (*docker.Image, error) PullImage(name string) error RemoveImage(imageID string) error + TagImage(imageID, imageName string) error CreateContainer(state State) (id string, err error) RunContainer(containerID string, attach bool) error CommitContainer(state State, message string) (imageID string, err error) @@ -332,3 +334,19 @@ func (c *DockerClient) UploadToContainer(containerID string, stream io.Reader, p return c.client.UploadToContainer(containerID, opts) } + +func (c *DockerClient) TagImage(imageID, imageName string) error { + log.Infof("| Tag %.12s -> %s", imageID, imageName) + + img := imagename.NewFromString(imageName) + + opts := docker.TagImageOptions{ + Repo: img.NameWithRegistry(), + Tag: img.GetTag(), + Force: true, + } + + log.Debugf("Tag image %s with options: %# v", imageID, opts) + + return c.client.TagImage(imageID, opts) +} diff --git a/src/rocker/build2/commands.go b/src/rocker/build2/commands.go index 96d78686..02ac82bd 100644 --- a/src/rocker/build2/commands.go +++ b/src/rocker/build2/commands.go @@ -601,6 +601,18 @@ func (c *CommandTag) String() string { } func (c *CommandTag) Execute(b *Build) (State, error) { + if len(c.cfg.args) != 1 { + return b.state, fmt.Errorf("TAG requires exactly one argument") + } + + if b.state.ImageID == "" { + return b.state, fmt.Errorf("Cannot TAG on empty image") + } + + if err := b.client.TagImage(b.state.ImageID, c.cfg.args[0]); err != nil { + return b.state, err + } + return b.state, nil } diff --git a/src/rocker/build2/commands_test.go b/src/rocker/build2/commands_test.go index e06a3519..9e7ab1c1 100644 --- a/src/rocker/build2/commands_test.go +++ b/src/rocker/build2/commands_test.go @@ -572,4 +572,52 @@ func TestCommandCopy_Simple(t *testing.T) { assert.Equal(t, "456", state.ContainerID) } +// =========== Testing TAG =========== + +func TestCommandTag_Simple(t *testing.T) { + b, c := makeBuild(t, "", Config{}) + cmd := &CommandTag{ConfigCommand{ + args: []string{"docker.io/grammarly/rocker:1.0"}, + }} + + b.state.ImageID = "123" + + c.On("TagImage", "123", "docker.io/grammarly/rocker:1.0").Return(nil).Once() + + _, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + c.AssertExpectations(t) +} + +func TestCommandTag_WrongArgsNumber(t *testing.T) { + b, _ := makeBuild(t, "", Config{}) + cmd := &CommandTag{ConfigCommand{ + args: []string{}, + }} + cmd2 := &CommandTag{ConfigCommand{ + args: []string{"1", "2"}, + }} + + b.state.ImageID = "123" + + _, err := cmd.Execute(b) + assert.EqualError(t, err, "TAG requires exactly one argument") + + _, err2 := cmd2.Execute(b) + assert.EqualError(t, err2, "TAG requires exactly one argument") +} + +func TestCommandTag_NoImage(t *testing.T) { + b, _ := makeBuild(t, "", Config{}) + cmd := &CommandTag{ConfigCommand{ + args: []string{"docker.io/grammarly/rocker:1.0"}, + }} + + _, err := cmd.Execute(b) + assert.EqualError(t, err, "Cannot TAG on empty image") +} + // TODO: test Cleanup From 9432133dc307be980c21838ecf94303683d0ba79 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Thu, 24 Sep 2015 09:10:21 +0300 Subject: [PATCH 047/131] wrap ONBUILD commands correctly --- src/rocker/build2/commands.go | 39 +++++++++++++++++++++-------------- 1 file changed, 23 insertions(+), 16 deletions(-) diff --git a/src/rocker/build2/commands.go b/src/rocker/build2/commands.go index 02ac82bd..b94faceb 100644 --- a/src/rocker/build2/commands.go +++ b/src/rocker/build2/commands.go @@ -52,39 +52,46 @@ type Command interface { String() string } -func NewCommand(cfg ConfigCommand) (Command, error) { +func NewCommand(cfg ConfigCommand) (cmd Command, err error) { // TODO: use reflection? switch cfg.name { case "from": - return &CommandFrom{cfg}, nil + cmd = &CommandFrom{cfg} case "maintainer": - return &CommandMaintainer{cfg}, nil + cmd = &CommandMaintainer{cfg} case "run": - return &CommandRun{cfg}, nil + cmd = &CommandRun{cfg} case "env": - return &CommandEnv{cfg}, nil + cmd = &CommandEnv{cfg} case "label": - return &CommandLabel{cfg}, nil + cmd = &CommandLabel{cfg} case "workdir": - return &CommandWorkdir{cfg}, nil + cmd = &CommandWorkdir{cfg} case "tag": - return &CommandTag{cfg}, nil + cmd = &CommandTag{cfg} case "copy": - return &CommandCopy{cfg}, nil + cmd = &CommandCopy{cfg} case "add": - return &CommandAdd{cfg}, nil + cmd = &CommandAdd{cfg} case "cmd": - return &CommandCmd{cfg}, nil + cmd = &CommandCmd{cfg} case "entrypoint": - return &CommandEntrypoint{cfg}, nil + cmd = &CommandEntrypoint{cfg} case "expose": - return &CommandExpose{cfg}, nil + cmd = &CommandExpose{cfg} case "volume": - return &CommandVolume{cfg}, nil + cmd = &CommandVolume{cfg} case "user": - return &CommandUser{cfg}, nil + cmd = &CommandUser{cfg} + default: + return nil, fmt.Errorf("Unknown command: %s", cfg.name) + } + + if cfg.isOnbuild { + cmd = &CommandOnbuildWrap{cmd} } - return nil, fmt.Errorf("Unknown command: %s", cfg.name) + + return cmd, nil } // CommandFrom implements FROM From 355aa6bfda0f652aada0e6a9417b69f425432d47 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Thu, 24 Sep 2015 09:14:59 +0300 Subject: [PATCH 048/131] TAG use imagename.String for logging --- src/rocker/build2/client.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rocker/build2/client.go b/src/rocker/build2/client.go index 3bbd5e44..757695eb 100644 --- a/src/rocker/build2/client.go +++ b/src/rocker/build2/client.go @@ -336,10 +336,10 @@ func (c *DockerClient) UploadToContainer(containerID string, stream io.Reader, p } func (c *DockerClient) TagImage(imageID, imageName string) error { - log.Infof("| Tag %.12s -> %s", imageID, imageName) - img := imagename.NewFromString(imageName) + log.Infof("| Tag %.12s -> %s", imageID, img) + opts := docker.TagImageOptions{ Repo: img.NameWithRegistry(), Tag: img.GetTag(), From 1d47904de070f6055ee4637039800408c18b7bf3 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Thu, 24 Sep 2015 09:32:32 +0300 Subject: [PATCH 049/131] PUSH impl --- src/rocker/build2/build_test.go | 5 +++ src/rocker/build2/client.go | 39 ++++++++++++++++++++++++ src/rocker/build2/commands.go | 31 +++++++++++++++++++ src/rocker/build2/commands_test.go | 49 ++++++++++++++++++++++++++++++ 4 files changed, 124 insertions(+) diff --git a/src/rocker/build2/build_test.go b/src/rocker/build2/build_test.go index efd65579..e0957fa9 100644 --- a/src/rocker/build2/build_test.go +++ b/src/rocker/build2/build_test.go @@ -74,6 +74,11 @@ func (m *MockClient) TagImage(imageID, imageName string) error { return args.Error(0) } +func (m *MockClient) PushImage(imageName string) error { + args := m.Called(imageName) + return args.Error(0) +} + func (m *MockClient) CreateContainer(state State) (string, error) { args := m.Called(state) return args.String(0), args.Error(1) diff --git a/src/rocker/build2/client.go b/src/rocker/build2/client.go index 757695eb..149db12b 100644 --- a/src/rocker/build2/client.go +++ b/src/rocker/build2/client.go @@ -39,6 +39,7 @@ type Client interface { PullImage(name string) error RemoveImage(imageID string) error TagImage(imageID, imageName string) error + PushImage(imageName string) error CreateContainer(state State) (id string, err error) RunContainer(containerID string, attach bool) error CommitContainer(state State, message string) (imageID string, err error) @@ -350,3 +351,41 @@ func (c *DockerClient) TagImage(imageID, imageName string) error { return c.client.TagImage(imageID, opts) } + +func (c *DockerClient) PushImage(imageName string) error { + var ( + img = imagename.NewFromString(imageName) + errch = make(chan error) + + pipeReader, pipeWriter = io.Pipe() + def = log.StandardLogger() + fdOut, isTerminalOut = term.GetFdInfo(def.Out) + out = def.Out + + opts = docker.PushImageOptions{ + Name: img.NameWithRegistry(), + Tag: img.GetTag(), + Registry: img.Registry, + OutputStream: pipeWriter, + RawJSONStream: true, + } + ) + + if !isTerminalOut { + out = def.Writer() + } + + log.Infof("| Push %s", img) + + log.Debugf("Push with options: %# v", opts) + + go func() { + errch <- jsonmessage.DisplayJSONMessagesStream(pipeReader, out, fdOut, isTerminalOut) + }() + + if err := c.client.PushImage(opts, c.auth); err != nil { + return err + } + + return <-errch +} diff --git a/src/rocker/build2/commands.go b/src/rocker/build2/commands.go index b94faceb..73833a1a 100644 --- a/src/rocker/build2/commands.go +++ b/src/rocker/build2/commands.go @@ -69,6 +69,8 @@ func NewCommand(cfg ConfigCommand) (cmd Command, err error) { cmd = &CommandWorkdir{cfg} case "tag": cmd = &CommandTag{cfg} + case "push": + cmd = &CommandPush{cfg} case "copy": cmd = &CommandCopy{cfg} case "add": @@ -623,6 +625,35 @@ func (c *CommandTag) Execute(b *Build) (State, error) { return b.state, nil } +// CommandPush implements PUSH +type CommandPush struct { + cfg ConfigCommand +} + +func (c *CommandPush) String() string { + return c.cfg.original +} + +func (c *CommandPush) Execute(b *Build) (State, error) { + if len(c.cfg.args) != 1 { + return b.state, fmt.Errorf("PUSH requires exactly one argument") + } + + if b.state.ImageID == "" { + return b.state, fmt.Errorf("Cannot PUSH empty image") + } + + if err := b.client.TagImage(b.state.ImageID, c.cfg.args[0]); err != nil { + return b.state, err + } + + if err := b.client.PushImage(c.cfg.args[0]); err != nil { + return b.state, err + } + + return b.state, nil +} + // CommandCopy implements COPY type CommandCopy struct { cfg ConfigCommand diff --git a/src/rocker/build2/commands_test.go b/src/rocker/build2/commands_test.go index 9e7ab1c1..33aaf6ed 100644 --- a/src/rocker/build2/commands_test.go +++ b/src/rocker/build2/commands_test.go @@ -620,4 +620,53 @@ func TestCommandTag_NoImage(t *testing.T) { assert.EqualError(t, err, "Cannot TAG on empty image") } +// =========== Testing PUSH =========== + +func TestCommandPush_Simple(t *testing.T) { + b, c := makeBuild(t, "", Config{}) + cmd := &CommandPush{ConfigCommand{ + args: []string{"docker.io/grammarly/rocker:1.0"}, + }} + + b.state.ImageID = "123" + + c.On("TagImage", "123", "docker.io/grammarly/rocker:1.0").Return(nil).Once() + c.On("PushImage", "docker.io/grammarly/rocker:1.0").Return(nil).Once() + + _, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + c.AssertExpectations(t) +} + +func TestCommandPush_WrongArgsNumber(t *testing.T) { + b, _ := makeBuild(t, "", Config{}) + cmd := &CommandPush{ConfigCommand{ + args: []string{}, + }} + cmd2 := &CommandPush{ConfigCommand{ + args: []string{"1", "2"}, + }} + + b.state.ImageID = "123" + + _, err := cmd.Execute(b) + assert.EqualError(t, err, "PUSH requires exactly one argument") + + _, err2 := cmd2.Execute(b) + assert.EqualError(t, err2, "PUSH requires exactly one argument") +} + +func TestCommandPush_NoImage(t *testing.T) { + b, _ := makeBuild(t, "", Config{}) + cmd := &CommandPush{ConfigCommand{ + args: []string{"docker.io/grammarly/rocker:1.0"}, + }} + + _, err := cmd.Execute(b) + assert.EqualError(t, err, "Cannot PUSH empty image") +} + // TODO: test Cleanup From 78a376636fe3733d33ab95a83ca67d611d9d9f68 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Thu, 24 Sep 2015 09:35:24 +0300 Subject: [PATCH 050/131] refactor image pull --- src/rocker/build2/client.go | 23 ++++++++--------------- 1 file changed, 8 insertions(+), 15 deletions(-) diff --git a/src/rocker/build2/client.go b/src/rocker/build2/client.go index 149db12b..48a0c11c 100644 --- a/src/rocker/build2/client.go +++ b/src/rocker/build2/client.go @@ -83,7 +83,7 @@ func (c *DockerClient) PullImage(name string) error { out = def.Writer() } - pullOpts := docker.PullImageOptions{ + opts := docker.PullImageOptions{ Repository: image.NameWithRegistry(), Registry: image.Registry, Tag: image.GetTag(), @@ -91,25 +91,18 @@ func (c *DockerClient) PullImage(name string) error { RawJSONStream: true, } - go func() { - err := c.client.PullImage(pullOpts, c.auth) - - if err := pipeWriter.Close(); err != nil { - log.Errorf("pipeWriter.Close() err: %s", err) - } + log.Infof("| Pull image %s", image) + log.Debugf("Pull image %s with options: %# v", image, opts) - errch <- err + go func() { + errch <- jsonmessage.DisplayJSONMessagesStream(pipeReader, out, fdOut, isTerminalOut) }() - if err := jsonmessage.DisplayJSONMessagesStream(pipeReader, out, fdOut, isTerminalOut); err != nil { - return fmt.Errorf("Failed to process json stream for image: %s, error: %s", image, err) - } - - if err := <-errch; err != nil { - return fmt.Errorf("Failed to pull image: %s, error: %s", image, err) + if err := c.client.PullImage(opts, c.auth); err != nil { + return err } - return nil + return <-errch } func (c *DockerClient) RemoveImage(imageID string) error { From 9b74d2265921efa7c481af076651d318c3c8b5c5 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Thu, 24 Sep 2015 10:44:39 +0300 Subject: [PATCH 051/131] ATTACH impl --- src/cmd/rocker/main.go | 1 + src/rocker/build2/build.go | 1 + src/rocker/build2/client.go | 68 ++++++++++++++++------------- src/rocker/build2/client_tty.go | 77 +++++++++++++++++++++++++++++++++ src/rocker/build2/commands.go | 63 +++++++++++++++++++++++++++ src/rocker/build2/util.go | 41 ++++++++++++++++++ 6 files changed, 222 insertions(+), 29 deletions(-) create mode 100644 src/rocker/build2/client_tty.go create mode 100644 src/rocker/build2/util.go diff --git a/src/cmd/rocker/main.go b/src/cmd/rocker/main.go index 232448f5..13fbe3de 100644 --- a/src/cmd/rocker/main.go +++ b/src/cmd/rocker/main.go @@ -266,6 +266,7 @@ func buildCommand(c *cli.Context) { ContextDir: contextDir, Pull: c.Bool("pull"), NoGarbage: c.Bool("no-garbage"), + Attach: c.Bool("attach"), }) plan, err := build2.NewPlan(rockerfile.Commands(), true) diff --git a/src/rocker/build2/build.go b/src/rocker/build2/build.go index 97bf67b6..ddab901d 100644 --- a/src/rocker/build2/build.go +++ b/src/rocker/build2/build.go @@ -37,6 +37,7 @@ type Config struct { ContextDir string Pull bool NoGarbage bool + Attach bool } type State struct { diff --git a/src/rocker/build2/client.go b/src/rocker/build2/client.go index 48a0c11c..e07318ab 100644 --- a/src/rocker/build2/client.go +++ b/src/rocker/build2/client.go @@ -147,10 +147,13 @@ func (c *DockerClient) CreateContainer(s State) (string, error) { func (c *DockerClient) RunContainer(containerID string, attachStdin bool) error { var ( - success = make(chan struct{}) - def = log.StandardLogger() + success = make(chan struct{}) + finished = make(chan struct{}, 1) + sigch = make(chan os.Signal, 1) + errch = make(chan error) // Wrap output streams with logger + def = log.StandardLogger() outLogger = &log.Logger{ Out: def.Out, Formatter: NewContainerFormatter(containerID, log.InfoLevel), @@ -161,6 +164,9 @@ func (c *DockerClient) RunContainer(containerID string, attachStdin bool) error Formatter: NewContainerFormatter(containerID, log.ErrorLevel), Level: def.Level, } + + in = os.Stdin + fdIn, isTerminalIn = term.GetFdInfo(in) ) attachOpts := docker.AttachToContainerOptions{ @@ -173,25 +179,31 @@ func (c *DockerClient) RunContainer(containerID string, attachStdin bool) error Success: success, } - // TODO: will implement attach later - // if attachStdin { - // if !builder.isTerminalIn { - // return fmt.Errorf("Cannot attach to a container on non tty input") - // } - // oldState, err := term.SetRawTerminal(builder.fdIn) - // if err != nil { - // return err - // } - // defer term.RestoreTerminal(builder.fdIn, oldState) - - // attachOpts.InputStream = readerVoidCloser{builder.InStream} - // attachOpts.OutputStream = builder.OutStream - // attachOpts.ErrorStream = builder.OutStream - // attachOpts.Stdin = true - // attachOpts.RawTerminal = true - // } - - finished := make(chan struct{}, 1) + // Used by ATTACH + if attachStdin { + log.Infof("| Attach stdin to the container %.12s", containerID) + + if !isTerminalIn { + return fmt.Errorf("Cannot attach to a container on non tty input") + } + + attachOpts.InputStream = readerVoidCloser{in} + attachOpts.OutputStream = os.Stdout + attachOpts.ErrorStream = os.Stderr + attachOpts.Stdin = true + attachOpts.RawTerminal = true + } + + // We want do debug the final attach options before setting raw term + log.Debugf("Attach to container with options: %# v", attachOpts) + + if attachStdin { + oldState, err := term.SetRawTerminal(fdIn) + if err != nil { + return err + } + defer term.RestoreTerminal(fdIn, oldState) + } go func() { if err := c.client.AttachToContainer(attachOpts); err != nil { @@ -222,21 +234,19 @@ func (c *DockerClient) RunContainer(containerID string, attachStdin bool) error return err } - // if attachStdin { - // if err := builder.monitorTtySize(containerID); err != nil { - // return fmt.Errorf("Failed to monitor TTY size for container %.12s, error: %s", containerID, err) - // } - // } + if attachStdin { + if err := c.monitorTtySize(containerID, os.Stdout); err != nil { + return fmt.Errorf("Failed to monitor TTY size for container %.12s, error: %s", containerID, err) + } + } // TODO: move signal handling to the builder? - sigch := make(chan os.Signal, 1) signal.Notify(sigch, os.Interrupt) - errch := make(chan error) - go func() { statusCode, err := c.client.WaitContainer(containerID) + // log.Debugf("Wait finished, status %q error %q", statusCode, err) if err != nil { errch <- err } else if statusCode != 0 { diff --git a/src/rocker/build2/client_tty.go b/src/rocker/build2/client_tty.go new file mode 100644 index 00000000..124065e2 --- /dev/null +++ b/src/rocker/build2/client_tty.go @@ -0,0 +1,77 @@ +// This code is borrowed from Docker +// Licensed under the Apache License, Version 2.0; Copyright 2013-2015 Docker, Inc. See LICENSE.APACHE +// NOTICE: no changes has been made to these functions code + +package build2 + +import ( + "io" + "os" + gosignal "os/signal" + "runtime" + "time" + + log "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/term" +) + +func (c *DockerClient) monitorTtySize(id string, out io.Writer) error { + c.resizeTty(id, out) + + if runtime.GOOS == "windows" { + go func() { + prevH, prevW := c.getTtySize(out) + for { + time.Sleep(time.Millisecond * 250) + h, w := c.getTtySize(out) + + if prevW != w || prevH != h { + c.resizeTty(id, out) + } + prevH = h + prevW = w + } + }() + } else { + sigchan := make(chan os.Signal, 1) + gosignal.Notify(sigchan, signal.SIGWINCH) + go func() { + for range sigchan { + c.resizeTty(id, out) + } + }() + } + return nil +} + +func (c *DockerClient) resizeTty(id string, out io.Writer) { + height, width := c.getTtySize(out) + if height == 0 && width == 0 { + return + } + + if err := c.client.ResizeContainerTTY(id, height, width); err != nil { + log.Errorf("Failed to resize container TTY %.12s, error: %s\n", id, err) + } +} + +func (c *DockerClient) getTtySize(out io.Writer) (int, int) { + var ( + fdOut, isTerminalOut = term.GetFdInfo(out) + ) + + if !isTerminalOut { + return 0, 0 + } + + ws, err := term.GetWinsize(fdOut) + if err != nil { + log.Errorf("Error getting TTY size: %s\n", err) + if ws == nil { + return 0, 0 + } + } + + return int(ws.Height), int(ws.Width) +} diff --git a/src/rocker/build2/commands.go b/src/rocker/build2/commands.go index 73833a1a..f5462d25 100644 --- a/src/rocker/build2/commands.go +++ b/src/rocker/build2/commands.go @@ -61,6 +61,8 @@ func NewCommand(cfg ConfigCommand) (cmd Command, err error) { cmd = &CommandMaintainer{cfg} case "run": cmd = &CommandRun{cfg} + case "attach": + cmd = &CommandAttach{cfg} case "env": cmd = &CommandEnv{cfg} case "label": @@ -304,6 +306,67 @@ func (c *CommandRun) Execute(b *Build) (s State, err error) { return s, nil } +// CommandAttach implements ATTACH +type CommandAttach struct { + cfg ConfigCommand +} + +func (c *CommandAttach) String() string { + return c.cfg.original +} + +func (c *CommandAttach) Execute(b *Build) (s State, err error) { + s = b.state + + // simply ignore this command if we don't wanna attach + if !b.cfg.Attach { + log.Infof("Skip ATTACH; use --attach option to get inside") + s.SkipCommit() + return s, nil + } + + if s.ImageID == "" { + return s, fmt.Errorf("Please provide a source image with `FROM` prior to ATTACH") + } + + cmd := handleJSONArgs(c.cfg.args, c.cfg.attrs) + + if len(cmd) == 0 { + cmd = []string{"/bin/sh"} + } else if !c.cfg.attrs["json"] { + cmd = append([]string{"/bin/sh", "-c"}, cmd...) + } + + // TODO: test with ENTRYPOINT + + // We run this command in the container using CMD + + // Backup the config so we can restore it later + origConfig := s.Config + + s.Config.Cmd = cmd + s.Config.Entrypoint = []string{} + s.Config.Tty = true + s.Config.OpenStdin = true + s.Config.StdinOnce = true + s.Config.AttachStdin = true + s.Config.AttachStderr = true + s.Config.AttachStdout = true + + if s.ContainerID, err = b.client.CreateContainer(s); err != nil { + return s, err + } + + if err = b.client.RunContainer(s.ContainerID, true); err != nil { + return s, err + } + + // Restore the config + s.Config = origConfig + + return s, nil +} + // CommandEnv implements ENV type CommandEnv struct { cfg ConfigCommand diff --git a/src/rocker/build2/util.go b/src/rocker/build2/util.go new file mode 100644 index 00000000..4fd6b568 --- /dev/null +++ b/src/rocker/build2/util.go @@ -0,0 +1,41 @@ +/*- + * Copyright 2015 Grammarly, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package build2 + +import "io" + +// readerVoidCloser is a hack of the improved go-dockerclient's hijacking behavior +// It simply wraps io.Reader (os.Stdin in our case) and discards any Close() call. +// +// It's important because we don't want to close os.Stdin for two reasons: +// 1. We need to restore the terminal back from the raw mode after ATTACH +// 2. There can be other ATTACH instructions for which we need an open stdin +// +// See additional notes in the runContainerAttachStdin() function +type readerVoidCloser struct { + reader io.Reader +} + +// Read reads from current reader +func (r readerVoidCloser) Read(p []byte) (int, error) { + return r.reader.Read(p) +} + +// Close is a viod function, does nothing +func (r readerVoidCloser) Close() error { + return nil +} From cb1b83ad85f9600dde5c01da7dd27de4acfe37ca Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Thu, 24 Sep 2015 11:47:06 +0300 Subject: [PATCH 052/131] process ONBUILD dockerfile instructions --- src/rocker/build2/commands.go | 36 ++++++++++++++++++++++++++++++ src/rocker/build2/commands_test.go | 17 ++++++++++++++ 2 files changed, 53 insertions(+) diff --git a/src/rocker/build2/commands.go b/src/rocker/build2/commands.go index f5462d25..646b87b3 100644 --- a/src/rocker/build2/commands.go +++ b/src/rocker/build2/commands.go @@ -19,6 +19,7 @@ package build2 import ( "fmt" "path/filepath" + "regexp" "sort" "strings" @@ -87,6 +88,8 @@ func NewCommand(cfg ConfigCommand) (cmd Command, err error) { cmd = &CommandVolume{cfg} case "user": cmd = &CommandUser{cfg} + case "onbuild": + cmd = &CommandOnbuild{cfg} default: return nil, fmt.Errorf("Unknown command: %s", cfg.name) } @@ -663,6 +666,39 @@ func (c *CommandUser) Execute(b *Build) (s State, err error) { return s, nil } +// CommandOnbuild implements ONBUILD +type CommandOnbuild struct { + cfg ConfigCommand +} + +func (c *CommandOnbuild) String() string { + return c.cfg.original +} + +func (c *CommandOnbuild) Execute(b *Build) (s State, err error) { + + s = b.state + + if len(c.cfg.args) == 0 { + return s, fmt.Errorf("ONBUILD requires at least one argument") + } + + command := strings.ToUpper(strings.TrimSpace(c.cfg.args[0])) + switch command { + case "ONBUILD": + return s, fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") + case "MAINTAINER", "FROM": + return s, fmt.Errorf("%s isn't allowed as an ONBUILD trigger", command) + } + + orig := regexp.MustCompile(`(?i)^\s*ONBUILD\s*`).ReplaceAllString(c.cfg.original, "") + + s.Config.OnBuild = append(s.Config.OnBuild, orig) + s.Commit(fmt.Sprintf("ONBUILD %s", orig)) + + return s, nil +} + // CommandTag implements TAG type CommandTag struct { cfg ConfigCommand diff --git a/src/rocker/build2/commands_test.go b/src/rocker/build2/commands_test.go index 33aaf6ed..589b2d09 100644 --- a/src/rocker/build2/commands_test.go +++ b/src/rocker/build2/commands_test.go @@ -544,6 +544,23 @@ func TestCommandUser_Simple(t *testing.T) { assert.Equal(t, "www", state.Config.User) } +// =========== Testing ONBUILD =========== + +func TestCommandOnBuild_Simple(t *testing.T) { + b, _ := makeBuild(t, "", Config{}) + cmd := &CommandOnbuild{ConfigCommand{ + args: []string{"RUN", "make", "install"}, + original: "ONBUILD RUN make install", + }} + + state, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, []string{"RUN make install"}, state.Config.OnBuild) +} + // =========== Testing COPY =========== func TestCommandCopy_Simple(t *testing.T) { From 714dbabc2e38b175a93a979fe24c77180f1d74f4 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Thu, 24 Sep 2015 12:10:03 +0300 Subject: [PATCH 053/131] MOUNT src:dest impl --- src/rocker/build2/build.go | 1 + src/rocker/build2/build_test.go | 5 +++ src/rocker/build2/client.go | 14 +++---- src/rocker/build2/commands.go | 65 ++++++++++++++++++++++++++++++ src/rocker/build2/commands_test.go | 20 +++++++++ 5 files changed, 98 insertions(+), 7 deletions(-) diff --git a/src/rocker/build2/build.go b/src/rocker/build2/build.go index ddab901d..6f87d153 100644 --- a/src/rocker/build2/build.go +++ b/src/rocker/build2/build.go @@ -42,6 +42,7 @@ type Config struct { type State struct { Config docker.Config + HostConfig docker.HostConfig ImageID string ContainerID string CommitMsg []string diff --git a/src/rocker/build2/build_test.go b/src/rocker/build2/build_test.go index e0957fa9..7d96880c 100644 --- a/src/rocker/build2/build_test.go +++ b/src/rocker/build2/build_test.go @@ -103,3 +103,8 @@ func (m *MockClient) UploadToContainer(containerID string, stream io.Reader, pat args := m.Called(containerID, stream, path) return args.Error(0) } + +func (m *MockClient) ResolveHostPath(path string) (resultPath string, err error) { + args := m.Called(path) + return args.String(0), args.Error(1) +} diff --git a/src/rocker/build2/client.go b/src/rocker/build2/client.go index e07318ab..278860eb 100644 --- a/src/rocker/build2/client.go +++ b/src/rocker/build2/client.go @@ -22,6 +22,7 @@ import ( "os" "os/signal" + "rocker/dockerclient" "rocker/imagename" "github.com/docker/docker/pkg/units" @@ -45,6 +46,7 @@ type Client interface { CommitContainer(state State, message string) (imageID string, err error) RemoveContainer(containerID string) error UploadToContainer(containerID string, stream io.Reader, path string) error + ResolveHostPath(path string) (resultPath string, err error) } type DockerClient struct { @@ -116,9 +118,6 @@ func (c *DockerClient) RemoveImage(imageID string) error { } func (c *DockerClient) CreateContainer(s State) (string, error) { - // TODO: mount volumes - // volumesFrom := builder.getMountContainerIds() - // binds := builder.getBinds() s.Config.Image = s.ImageID @@ -126,10 +125,7 @@ func (c *DockerClient) CreateContainer(s State) (string, error) { opts := docker.CreateContainerOptions{ Config: &s.Config, - HostConfig: &docker.HostConfig{ - // Binds: binds, - // VolumesFrom: volumesFrom, - }, + HostConfig: &s.HostConfig, } log.Debugf("Create container: %# v", pretty.Formatter(opts)) @@ -392,3 +388,7 @@ func (c *DockerClient) PushImage(imageName string) error { return <-errch } + +func (c *DockerClient) ResolveHostPath(path string) (resultPath string, err error) { + return dockerclient.ResolveHostPath(path, c.client) +} diff --git a/src/rocker/build2/commands.go b/src/rocker/build2/commands.go index 646b87b3..493c3c93 100644 --- a/src/rocker/build2/commands.go +++ b/src/rocker/build2/commands.go @@ -18,6 +18,8 @@ package build2 import ( "fmt" + "os" + "path" "path/filepath" "regexp" "sort" @@ -90,6 +92,8 @@ func NewCommand(cfg ConfigCommand) (cmd Command, err error) { cmd = &CommandUser{cfg} case "onbuild": cmd = &CommandOnbuild{cfg} + case "mount": + cmd = &CommandMount{cfg} default: return nil, fmt.Errorf("Unknown command: %s", cfg.name) } @@ -786,6 +790,67 @@ func (c *CommandAdd) Execute(b *Build) (State, error) { return copyFiles(b, c.cfg.args, "ADD") } +// CommandMount implements MOUNT +type CommandMount struct { + cfg ConfigCommand +} + +func (c *CommandMount) String() string { + return c.cfg.original +} + +func (c *CommandMount) Execute(b *Build) (s State, err error) { + + s = b.state + + if len(c.cfg.args) == 0 { + return b.state, fmt.Errorf("MOUNT requires at least one argument") + } + + commitIds := []string{} + + for _, arg := range c.cfg.args { + + switch strings.Contains(arg, ":") { + // MOUNT src:dest + case true: + var ( + pair = strings.SplitN(arg, ":", 2) + src = pair[0] + dest = pair[1] + err error + ) + + // Process relative paths in volumes + if strings.HasPrefix(src, "~") { + src = strings.Replace(src, "~", os.Getenv("HOME"), 1) + } + if !path.IsAbs(src) { + src = path.Join(b.cfg.ContextDir, src) + } + + if src, err = b.client.ResolveHostPath(src); err != nil { + return s, err + } + + if s.HostConfig.Binds == nil { + s.HostConfig.Binds = []string{} + } + + s.HostConfig.Binds = append(s.HostConfig.Binds, src+":"+dest) + commitIds = append(commitIds, arg) + + // MOUNT dir + case false: + // mount = builderMount{cache: useCache, dest: arg} + } + } + + s.Commit(fmt.Sprintf("MOUNT %q", commitIds)) + + return s, nil +} + // CommandOnbuildWrap wraps ONBUILD command type CommandOnbuildWrap struct { cmd Command diff --git a/src/rocker/build2/commands_test.go b/src/rocker/build2/commands_test.go index 589b2d09..1c1a8701 100644 --- a/src/rocker/build2/commands_test.go +++ b/src/rocker/build2/commands_test.go @@ -686,4 +686,24 @@ func TestCommandPush_NoImage(t *testing.T) { assert.EqualError(t, err, "Cannot PUSH empty image") } +// =========== Testing MOUNT =========== + +func TestCommandMount_Simple(t *testing.T) { + b, c := makeBuild(t, "", Config{}) + cmd := &CommandMount{ConfigCommand{ + args: []string{"/src:/dest"}, + }} + + c.On("ResolveHostPath", "/src").Return("/resolved/src", nil).Once() + + state, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + c.AssertExpectations(t) + assert.Equal(t, []string{"/resolved/src:/dest"}, state.HostConfig.Binds) + assert.Equal(t, []string{`MOUNT ["/src:/dest"]`}, state.CommitMsg) +} + // TODO: test Cleanup From 234679c3824e419fbc4d7d8b7f3658e9159553a2 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Thu, 24 Sep 2015 19:25:20 +0300 Subject: [PATCH 054/131] MOUNT volume container --- src/cmd/rocker/main.go | 1 + src/rocker/build2/build.go | 22 +++++++++++++ src/rocker/build2/build_test.go | 10 ++++++ src/rocker/build2/client.go | 50 ++++++++++++++++++++++++++++++ src/rocker/build2/commands.go | 12 ++++++- src/rocker/build2/commands_test.go | 28 +++++++++++++++++ src/rocker/build2/util.go | 21 ++++++++++++- 7 files changed, 142 insertions(+), 2 deletions(-) diff --git a/src/cmd/rocker/main.go b/src/cmd/rocker/main.go index 13fbe3de..2d55f992 100644 --- a/src/cmd/rocker/main.go +++ b/src/cmd/rocker/main.go @@ -267,6 +267,7 @@ func buildCommand(c *cli.Context) { Pull: c.Bool("pull"), NoGarbage: c.Bool("no-garbage"), Attach: c.Bool("attach"), + ID: c.String("id"), }) plan, err := build2.NewPlan(rockerfile.Commands(), true) diff --git a/src/rocker/build2/build.go b/src/rocker/build2/build.go index 6f87d153..fa001b65 100644 --- a/src/rocker/build2/build.go +++ b/src/rocker/build2/build.go @@ -29,12 +29,14 @@ import ( var ( NoBaseImageSpecifier = "scratch" + MountVolumeImage = "grammarly/scratch:latest" ) type Config struct { OutStream io.Writer InStream io.ReadCloser ContextDir string + ID string Pull bool NoGarbage bool Attach bool @@ -112,6 +114,26 @@ func (b *Build) GetImageID() string { return b.state.ImageID } +func (b *Build) createVolumeContainer(path string) (name string, err error) { + + name = b.mountsContainerName(path) + + config := &docker.Config{ + Image: MountVolumeImage, + Volumes: map[string]struct{}{ + path: struct{}{}, + }, + } + + if _, err = b.client.EnsureContainer(name, config, path); err != nil { + return name, err + } + + log.Infof("| Using container %s for %s", name, path) + + return name, nil +} + func (s *State) Commit(msg string) { s.CommitMsg = append(s.CommitMsg, msg) } diff --git a/src/rocker/build2/build_test.go b/src/rocker/build2/build_test.go index 7d96880c..c2226f19 100644 --- a/src/rocker/build2/build_test.go +++ b/src/rocker/build2/build_test.go @@ -108,3 +108,13 @@ func (m *MockClient) ResolveHostPath(path string) (resultPath string, err error) args := m.Called(path) return args.String(0), args.Error(1) } + +func (m *MockClient) EnsureImage(imageName string) error { + args := m.Called(imageName) + return args.Error(0) +} + +func (m *MockClient) EnsureContainer(containerName string, config *docker.Config, purpose string) (containerID string, err error) { + args := m.Called(containerName, config, purpose) + return args.String(0), args.Error(1) +} diff --git a/src/rocker/build2/client.go b/src/rocker/build2/client.go index 278860eb..c01d742b 100644 --- a/src/rocker/build2/client.go +++ b/src/rocker/build2/client.go @@ -41,11 +41,13 @@ type Client interface { RemoveImage(imageID string) error TagImage(imageID, imageName string) error PushImage(imageName string) error + EnsureImage(imageName string) error CreateContainer(state State) (id string, err error) RunContainer(containerID string, attach bool) error CommitContainer(state State, message string) (imageID string, err error) RemoveContainer(containerID string) error UploadToContainer(containerID string, stream io.Reader, path string) error + EnsureContainer(containerName string, config *docker.Config, purpose string) (containerID string, err error) ResolveHostPath(path string) (resultPath string, err error) } @@ -392,3 +394,51 @@ func (c *DockerClient) PushImage(imageName string) error { func (c *DockerClient) ResolveHostPath(path string) (resultPath string, err error) { return dockerclient.ResolveHostPath(path, c.client) } + +func (c *DockerClient) EnsureImage(imageName string) (err error) { + + var img *docker.Image + if img, err = c.client.InspectImage(imageName); err != nil && err != docker.ErrNoSuchImage { + return err + } + if img != nil { + return nil + } + + return c.PullImage(imageName) +} + +func (c *DockerClient) EnsureContainer(containerName string, config *docker.Config, purpose string) (containerID string, err error) { + + // Check if container exists + container, err := c.client.InspectContainer(containerName) + + if _, ok := err.(*docker.NoSuchContainer); !ok && err != nil { + return "", err + } + if container != nil { + return container.ID, nil + } + + // No data volume container for this build, create it + + if err := c.EnsureImage(config.Image); err != nil { + return "", fmt.Errorf("Failed to check image %s, error: %s", config.Image, err) + } + + log.Infof("| Create container: %s for %s", containerName, purpose) + + opts := docker.CreateContainerOptions{ + Name: containerName, + Config: config, + } + + log.Debugf("Create container options %# v", opts) + + container, err = c.client.CreateContainer(opts) + if err != nil { + return "", fmt.Errorf("Failed to create container %s from image %s, error: %s", containerName, config.Image, err) + } + + return container.ID, err +} diff --git a/src/rocker/build2/commands.go b/src/rocker/build2/commands.go index 493c3c93..1fe48c23 100644 --- a/src/rocker/build2/commands.go +++ b/src/rocker/build2/commands.go @@ -842,7 +842,17 @@ func (c *CommandMount) Execute(b *Build) (s State, err error) { // MOUNT dir case false: - // mount = builderMount{cache: useCache, dest: arg} + name, err := b.createVolumeContainer(arg) + if err != nil { + return s, err + } + + if s.HostConfig.VolumesFrom == nil { + s.HostConfig.VolumesFrom = []string{} + } + + s.HostConfig.VolumesFrom = append(s.HostConfig.VolumesFrom, name) + commitIds = append(commitIds, name+":"+arg) } } diff --git a/src/rocker/build2/commands_test.go b/src/rocker/build2/commands_test.go index 1c1a8701..61f22e63 100644 --- a/src/rocker/build2/commands_test.go +++ b/src/rocker/build2/commands_test.go @@ -17,6 +17,7 @@ package build2 import ( + "fmt" "reflect" "testing" @@ -706,4 +707,31 @@ func TestCommandMount_Simple(t *testing.T) { assert.Equal(t, []string{`MOUNT ["/src:/dest"]`}, state.CommitMsg) } +func TestCommandMount_VolumeContainer(t *testing.T) { + b, c := makeBuild(t, "", Config{}) + cmd := &CommandMount{ConfigCommand{ + args: []string{"/cache"}, + }} + + containerName := b.mountsContainerName("/cache") + + c.On("EnsureContainer", containerName, mock.AnythingOfType("*docker.Config"), "/cache").Return("123", nil).Run(func(args mock.Arguments) { + arg := args.Get(1).(*docker.Config) + // TODO: a better check + // assert.True(t, len(arg.Config.Cmd) > 0) + assert.Equal(t, MountVolumeImage, arg.Image) + }).Once() + + state, err := cmd.Execute(b) + if err != nil { + t.Fatal(err) + } + + commitMsg := fmt.Sprintf("MOUNT [\"%s:/cache\"]", containerName) + + c.AssertExpectations(t) + assert.Equal(t, []string{containerName}, state.HostConfig.VolumesFrom) + assert.Equal(t, []string{commitMsg}, state.CommitMsg) +} + // TODO: test Cleanup diff --git a/src/rocker/build2/util.go b/src/rocker/build2/util.go index 4fd6b568..68700fb5 100644 --- a/src/rocker/build2/util.go +++ b/src/rocker/build2/util.go @@ -16,7 +16,26 @@ package build2 -import "io" +import ( + "crypto/md5" + "fmt" + "io" +) + +// mountsContainerName returns the name of volume container that will be used for a particular MOUNT +func (b *Build) mountsContainerName(path string) string { + // TODO: mounts are reused between different FROMs, is it ok? + mountID := b.getIdentifier() + ":" + path + return fmt.Sprintf("rocker_mount_%.6x", md5.Sum([]byte(mountID))) +} + +// getIdentifier returns the sequence that is unique to the current Rockerfile +func (b *Build) getIdentifier() string { + if b.cfg.ID != "" { + return b.cfg.ID + } + return b.cfg.ContextDir + ":" + b.rockerfile.Name +} // readerVoidCloser is a hack of the improved go-dockerclient's hijacking behavior // It simply wraps io.Reader (os.Stdin in our case) and discards any Close() call. From 58c50d5ac90275b1b60914c19bf5dfb08d0955dd Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Thu, 24 Sep 2015 20:25:25 +0300 Subject: [PATCH 055/131] improve MOUNT volume tests --- src/rocker/build2/commands_test.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/rocker/build2/commands_test.go b/src/rocker/build2/commands_test.go index 61f22e63..4d676e43 100644 --- a/src/rocker/build2/commands_test.go +++ b/src/rocker/build2/commands_test.go @@ -717,9 +717,11 @@ func TestCommandMount_VolumeContainer(t *testing.T) { c.On("EnsureContainer", containerName, mock.AnythingOfType("*docker.Config"), "/cache").Return("123", nil).Run(func(args mock.Arguments) { arg := args.Get(1).(*docker.Config) - // TODO: a better check - // assert.True(t, len(arg.Config.Cmd) > 0) assert.Equal(t, MountVolumeImage, arg.Image) + expectedVolumes := map[string]struct{}{ + "/cache": struct{}{}, + } + assert.True(t, reflect.DeepEqual(expectedVolumes, arg.Volumes)) }).Once() state, err := cmd.Execute(b) From b8892c92c506da97d7cf1151fbf8b36c05c1c9d9 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Thu, 24 Sep 2015 20:25:53 +0300 Subject: [PATCH 056/131] refactoring --- src/rocker/build2/build.go | 2 +- src/rocker/build2/commands.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rocker/build2/build.go b/src/rocker/build2/build.go index fa001b65..b25bfb87 100644 --- a/src/rocker/build2/build.go +++ b/src/rocker/build2/build.go @@ -114,7 +114,7 @@ func (b *Build) GetImageID() string { return b.state.ImageID } -func (b *Build) createVolumeContainer(path string) (name string, err error) { +func (b *Build) getVolumeContainer(path string) (name string, err error) { name = b.mountsContainerName(path) diff --git a/src/rocker/build2/commands.go b/src/rocker/build2/commands.go index 1fe48c23..cbf4624c 100644 --- a/src/rocker/build2/commands.go +++ b/src/rocker/build2/commands.go @@ -842,7 +842,7 @@ func (c *CommandMount) Execute(b *Build) (s State, err error) { // MOUNT dir case false: - name, err := b.createVolumeContainer(arg) + name, err := b.getVolumeContainer(arg) if err != nil { return s, err } From 137803d567f1e6399be59969ad3516bc8e00fd07 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Thu, 24 Sep 2015 20:26:08 +0300 Subject: [PATCH 057/131] more debugging info --- src/rocker/build2/build.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/rocker/build2/build.go b/src/rocker/build2/build.go index b25bfb87..7281e80a 100644 --- a/src/rocker/build2/build.go +++ b/src/rocker/build2/build.go @@ -125,6 +125,8 @@ func (b *Build) getVolumeContainer(path string) (name string, err error) { }, } + log.Debugf("Make MOUNT volume container %s with options %# v", name, config) + if _, err = b.client.EnsureContainer(name, config, path); err != nil { return name, err } From 4ea294dbe8a1d10f6f474e63c93d406733e71383 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Thu, 24 Sep 2015 20:26:27 +0300 Subject: [PATCH 058/131] minor interface improvement --- src/rocker/build2/client.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rocker/build2/client.go b/src/rocker/build2/client.go index c01d742b..15f29681 100644 --- a/src/rocker/build2/client.go +++ b/src/rocker/build2/client.go @@ -43,7 +43,7 @@ type Client interface { PushImage(imageName string) error EnsureImage(imageName string) error CreateContainer(state State) (id string, err error) - RunContainer(containerID string, attach bool) error + RunContainer(containerID string, attachStdin bool) error CommitContainer(state State, message string) (imageID string, err error) RemoveContainer(containerID string) error UploadToContainer(containerID string, stream io.Reader, path string) error From d9de0a89f808933e5b931770932c5dd81fb1c451 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Thu, 24 Sep 2015 21:49:11 +0300 Subject: [PATCH 059/131] EXPORT and IMPORT impl --- src/cmd/rocker/main.go | 1 + src/rocker/build2/build.go | 27 ++++++ src/rocker/build2/commands.go | 177 +++++++++++++++++++++++++++++++++- src/rocker/build2/plan.go | 4 +- src/rocker/build2/util.go | 6 ++ 5 files changed, 211 insertions(+), 4 deletions(-) diff --git a/src/cmd/rocker/main.go b/src/cmd/rocker/main.go index 2d55f992..5f404d2c 100644 --- a/src/cmd/rocker/main.go +++ b/src/cmd/rocker/main.go @@ -267,6 +267,7 @@ func buildCommand(c *cli.Context) { Pull: c.Bool("pull"), NoGarbage: c.Bool("no-garbage"), Attach: c.Bool("attach"), + Verbose: c.GlobalBool("verbose"), ID: c.String("id"), }) diff --git a/src/rocker/build2/build.go b/src/rocker/build2/build.go index 7281e80a..f1ff04e4 100644 --- a/src/rocker/build2/build.go +++ b/src/rocker/build2/build.go @@ -30,6 +30,8 @@ import ( var ( NoBaseImageSpecifier = "scratch" MountVolumeImage = "grammarly/scratch:latest" + RsyncImage = "grammarly/rsync-static:1" + ExportsPath = "/.rocker_exports" ) type Config struct { @@ -40,6 +42,7 @@ type Config struct { Pull bool NoGarbage bool Attach bool + Verbose bool } type State struct { @@ -47,6 +50,7 @@ type State struct { HostConfig docker.HostConfig ImageID string ContainerID string + ExportsID string CommitMsg []string ProducedImage bool CmdSet bool @@ -136,6 +140,29 @@ func (b *Build) getVolumeContainer(path string) (name string, err error) { return name, nil } +func (b *Build) getExportsContainer() (name string, err error) { + name = b.exportsContainerName() + + config := &docker.Config{ + Image: RsyncImage, + Volumes: map[string]struct{}{ + "/opt/rsync/bin": struct{}{}, + ExportsPath: struct{}{}, + }, + } + + log.Debugf("Make EXPORT container %s with options %# v", name, config) + + containerID, err := b.client.EnsureContainer(name, config, "exports") + if err != nil { + return "", err + } + + log.Infof("| Using exports container %s", name) + + return containerID, nil +} + func (s *State) Commit(msg string) { s.CommitMsg = append(s.CommitMsg, msg) } diff --git a/src/rocker/build2/commands.go b/src/rocker/build2/commands.go index cbf4624c..03db2595 100644 --- a/src/rocker/build2/commands.go +++ b/src/rocker/build2/commands.go @@ -22,6 +22,7 @@ import ( "path" "path/filepath" "regexp" + "rocker/util" "sort" "strings" @@ -94,6 +95,10 @@ func NewCommand(cfg ConfigCommand) (cmd Command, err error) { cmd = &CommandOnbuild{cfg} case "mount": cmd = &CommandMount{cfg} + case "export": + cmd = &CommandExport{cfg} + case "import": + cmd = &CommandImport{cfg} default: return nil, fmt.Errorf("Unknown command: %s", cfg.name) } @@ -209,9 +214,16 @@ func (c *CommandCleanup) Execute(b *Build) (State, error) { } } + // Cleanup state + dirtyState := s + s = State{} + + // Keep some stuff between froms + s.ExportsID = dirtyState.ExportsID + // For final cleanup we want to keep imageID - if !c.final { - s.ImageID = "" + if c.final { + s.ImageID = dirtyState.ImageID } return s, nil @@ -861,6 +873,167 @@ func (c *CommandMount) Execute(b *Build) (s State, err error) { return s, nil } +// CommandExport implements EXPORT +type CommandExport struct { + cfg ConfigCommand +} + +func (c *CommandExport) String() string { + return c.cfg.original +} + +func (c *CommandExport) Execute(b *Build) (s State, err error) { + + s = b.state + args := c.cfg.args + + if len(args) == 0 { + return s, fmt.Errorf("EXPORT requires at least one argument") + } + + // If only one argument was given to EXPORT, use basename of a file + // EXPORT /my/dir/file.tar --> /EXPORT_VOLUME/file.tar + if len(args) < 2 { + args = []string{args[0], "/"} + } + + dest := args[len(args)-1] // last one is always the dest + + // EXPORT /my/dir my_dir --> /EXPORT_VOLUME/my_dir + // EXPORT /my/dir /my_dir --> /EXPORT_VOLUME/my_dir + // EXPORT /my/dir stuff/ --> /EXPORT_VOLUME/stuff/my_dir + // EXPORT /my/dir /stuff/ --> /EXPORT_VOLUME/stuff/my_dir + // EXPORT /my/dir/* / --> /EXPORT_VOLUME/stuff/my_dir + + exportsContainerName, err := b.getExportsContainer() + if err != nil { + return s, err + } + + // Remember original stuff so we can restore it when we finished + var exportsID string + origState := s + + defer func() { + s = origState + s.ExportsID = exportsID + }() + + // Append exports container as a volume + s.HostConfig.VolumesFrom = []string{exportsContainerName} + + // build the command + cmdDestPath, err := util.ResolvePath(ExportsPath, dest) + if err != nil { + return s, fmt.Errorf("Invalid EXPORT destination: %s", dest) + } + + cmd := []string{"/opt/rsync/bin/rsync", "-a", "--delete-during"} + + if b.cfg.Verbose { + cmd = append(cmd, "--verbose") + } + + cmd = append(cmd, args[0:len(args)-1]...) + cmd = append(cmd, cmdDestPath) + + s.Config.Cmd = cmd + s.Config.Entrypoint = []string{} + + // For caching + // builder.addLabels(map[string]string{ + // "rocker-exportsContainerId": exportsContainerID, + // }) + + if exportsID, err = b.client.CreateContainer(s); err != nil { + return s, err + } + + log.Infof("| Running in %.12s: %s", exportsID, strings.Join(cmd, " ")) + + if err = b.client.RunContainer(exportsID, false); err != nil { + return s, err + } + defer b.client.RemoveContainer(exportsID) + + return s, nil +} + +// CommandImport implements IMPORT +type CommandImport struct { + cfg ConfigCommand +} + +func (c *CommandImport) String() string { + return c.cfg.original +} + +func (c *CommandImport) Execute(b *Build) (s State, err error) { + s = b.state + args := c.cfg.args + + if len(args) == 0 { + return s, fmt.Errorf("IMPORT requires at least one argument") + } + if s.ExportsID == "" { + return s, fmt.Errorf("You have to EXPORT something first in order to IMPORT") + } + + // If only one argument was given to IMPORT, use the same path for destination + // IMPORT /my/dir/file.tar --> ADD ./EXPORT_VOLUME/my/dir/file.tar /my/dir/file.tar + if len(args) < 2 { + args = []string{args[0], "/"} + } + dest := args[len(args)-1] // last one is always the dest + + // Remember original stuff so we can restore it when we finished + origState := s + + var importID string + + defer func() { + s = origState + s.ContainerID = importID + s.Commit(fmt.Sprintf("IMPORT %s %q", s.ExportsID, args)) + }() + + cmd := []string{"/opt/rsync/bin/rsync", "-a"} + + if b.cfg.Verbose { + cmd = append(cmd, "--verbose") + } + + for _, arg := range args[0 : len(args)-1] { + argResolved, err := util.ResolvePath(ExportsPath, arg) + if err != nil { + return s, fmt.Errorf("Invalid IMPORT source: %s", arg) + } + cmd = append(cmd, argResolved) + } + cmd = append(cmd, dest) + + s.Config.Cmd = cmd + s.Config.Entrypoint = []string{} + s.HostConfig.VolumesFrom = []string{b.exportsContainerName()} + + if importID, err = b.client.CreateContainer(s); err != nil { + return s, err + } + + log.Infof("| Running in %.12s: %s", importID, strings.Join(cmd, " ")) + + if err = b.client.RunContainer(importID, false); err != nil { + return s, err + } + + // For caching + // builder.addLabels(map[string]string{ + // "rocker-lastExportImageId": builder.lastExportImageID, + // }) + + return s, nil +} + // CommandOnbuildWrap wraps ONBUILD command type CommandOnbuildWrap struct { cmd Command diff --git a/src/rocker/build2/plan.go b/src/rocker/build2/plan.go index d9dd9524..b06101fb 100644 --- a/src/rocker/build2/plan.go +++ b/src/rocker/build2/plan.go @@ -38,8 +38,8 @@ func NewPlan(commands []ConfigCommand, finalCleanup bool) (plan Plan, err error) } alwaysCommitBefore := "run attach add copy tag push" - alwaysCommitAfter := "run attach add copy" - neverCommitAfter := "from maintainer tag push" + alwaysCommitAfter := "run attach add copy import" + neverCommitAfter := "from maintainer tag push export" for i := 0; i < len(commands); i++ { cfg := commands[i] diff --git a/src/rocker/build2/util.go b/src/rocker/build2/util.go index 68700fb5..7bb052dd 100644 --- a/src/rocker/build2/util.go +++ b/src/rocker/build2/util.go @@ -29,6 +29,12 @@ func (b *Build) mountsContainerName(path string) string { return fmt.Sprintf("rocker_mount_%.6x", md5.Sum([]byte(mountID))) } +// exportsContainerName return the name of volume container that will be used for EXPORTs +func (b *Build) exportsContainerName() string { + mountID := b.getIdentifier() + return fmt.Sprintf("rocker_exports_%.6x", md5.Sum([]byte(mountID))) +} + // getIdentifier returns the sequence that is unique to the current Rockerfile func (b *Build) getIdentifier() string { if b.cfg.ID != "" { From 89dc762aedba71703bff4b4cf266ca681e071a82 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Thu, 24 Sep 2015 21:49:50 +0300 Subject: [PATCH 060/131] remove todo --- src/rocker/build2/commands.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/rocker/build2/commands.go b/src/rocker/build2/commands.go index 03db2595..1952af09 100644 --- a/src/rocker/build2/commands.go +++ b/src/rocker/build2/commands.go @@ -356,8 +356,6 @@ func (c *CommandAttach) Execute(b *Build) (s State, err error) { cmd = append([]string{"/bin/sh", "-c"}, cmd...) } - // TODO: test with ENTRYPOINT - // We run this command in the container using CMD // Backup the config so we can restore it later From 39968c85d8d0afd94cbf4b7c60dd80aef054ed84 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Thu, 24 Sep 2015 22:38:08 +0300 Subject: [PATCH 061/131] fix and improve stuff --- src/rocker/build2/client.go | 8 +------- src/rocker/build2/commands.go | 33 ++++++++++++++++++++------------- 2 files changed, 21 insertions(+), 20 deletions(-) diff --git a/src/rocker/build2/client.go b/src/rocker/build2/client.go index 15f29681..226c1790 100644 --- a/src/rocker/build2/client.go +++ b/src/rocker/build2/client.go @@ -248,13 +248,7 @@ func (c *DockerClient) RunContainer(containerID string, attachStdin bool) error if err != nil { errch <- err } else if statusCode != 0 { - // Remove errored container - // TODO: make option to keep them - if err := c.RemoveContainer(containerID); err != nil { - log.Error(err) - } - - errch <- fmt.Errorf("Failed to run container, exit with code %d", statusCode) + errch <- fmt.Errorf("Container %.12s exited with code %d", containerID, statusCode) } errch <- nil return diff --git a/src/rocker/build2/commands.go b/src/rocker/build2/commands.go index 1952af09..bf2a65a6 100644 --- a/src/rocker/build2/commands.go +++ b/src/rocker/build2/commands.go @@ -268,18 +268,19 @@ func (c *CommandCommit) Execute(b *Build) (s State, err error) { s.Config.Cmd = origCmd } + defer func(id string) { + s.ContainerID = "" + if err = b.client.RemoveContainer(id); err != nil { + log.Errorf("Failed to remove temporary container %.12s, error: %s", id, err) + } + }(s.ContainerID) + if s.ImageID, err = b.client.CommitContainer(s, message); err != nil { return s, err } s.ProducedImage = true - if err = b.client.RemoveContainer(s.ContainerID); err != nil { - return s, err - } - - s.ContainerID = "" - return s, nil } @@ -316,6 +317,7 @@ func (c *CommandRun) Execute(b *Build) (s State, err error) { } if err = b.client.RunContainer(s.ContainerID, false); err != nil { + b.client.RemoveContainer(s.ContainerID) return s, err } @@ -359,7 +361,10 @@ func (c *CommandAttach) Execute(b *Build) (s State, err error) { // We run this command in the container using CMD // Backup the config so we can restore it later - origConfig := s.Config + origState := s + defer func() { + s = origState + }() s.Config.Cmd = cmd s.Config.Entrypoint = []string{} @@ -375,12 +380,10 @@ func (c *CommandAttach) Execute(b *Build) (s State, err error) { } if err = b.client.RunContainer(s.ContainerID, true); err != nil { + b.client.RemoveContainer(s.ContainerID) return s, err } - // Restore the config - s.Config = origConfig - return s, nil } @@ -946,13 +949,13 @@ func (c *CommandExport) Execute(b *Build) (s State, err error) { if exportsID, err = b.client.CreateContainer(s); err != nil { return s, err } + defer b.client.RemoveContainer(exportsID) log.Infof("| Running in %.12s: %s", exportsID, strings.Join(cmd, " ")) if err = b.client.RunContainer(exportsID, false); err != nil { return s, err } - defer b.client.RemoveContainer(exportsID) return s, nil } @@ -991,8 +994,11 @@ func (c *CommandImport) Execute(b *Build) (s State, err error) { defer func() { s = origState - s.ContainerID = importID - s.Commit(fmt.Sprintf("IMPORT %s %q", s.ExportsID, args)) + + if err == nil { + s.ContainerID = importID + s.Commit(fmt.Sprintf("IMPORT %s %q", s.ExportsID, args)) + } }() cmd := []string{"/opt/rsync/bin/rsync", "-a"} @@ -1021,6 +1027,7 @@ func (c *CommandImport) Execute(b *Build) (s State, err error) { log.Infof("| Running in %.12s: %s", importID, strings.Join(cmd, " ")) if err = b.client.RunContainer(importID, false); err != nil { + b.client.RemoveContainer(importID) return s, err } From 88d1c1cba37ed4f564d5051aad1ecc4ba6ff6f7d Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Fri, 25 Sep 2015 10:10:52 +0300 Subject: [PATCH 062/131] vendor docker/pkg/fileutils --- vendor/manifest | 7 + .../docker/docker/pkg/fileutils/fileutils.go | 184 ++++++++ .../docker/pkg/fileutils/fileutils_test.go | 402 ++++++++++++++++++ .../docker/pkg/fileutils/fileutils_unix.go | 22 + .../docker/pkg/fileutils/fileutils_windows.go | 7 + 5 files changed, 622 insertions(+) create mode 100644 vendor/src/github.com/docker/docker/pkg/fileutils/fileutils.go create mode 100644 vendor/src/github.com/docker/docker/pkg/fileutils/fileutils_test.go create mode 100644 vendor/src/github.com/docker/docker/pkg/fileutils/fileutils_unix.go create mode 100644 vendor/src/github.com/docker/docker/pkg/fileutils/fileutils_windows.go diff --git a/vendor/manifest b/vendor/manifest index 680dfcee..62cf6977 100644 --- a/vendor/manifest +++ b/vendor/manifest @@ -136,6 +136,13 @@ "revision": "b0dc11127ef4fc20261ccc0db03a16b17f7f91c4", "branch": "master", "path": "/pkg/parsers" + }, + { + "importpath": "github.com/docker/docker/pkg/fileutils", + "repository": "https://github.com/docker/docker", + "revision": "02ae137b1d309729c32110aac6e315e798ba4f0e", + "branch": "master", + "path": "/pkg/fileutils" } ] } \ No newline at end of file diff --git a/vendor/src/github.com/docker/docker/pkg/fileutils/fileutils.go b/vendor/src/github.com/docker/docker/pkg/fileutils/fileutils.go new file mode 100644 index 00000000..08b9840c --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/fileutils/fileutils.go @@ -0,0 +1,184 @@ +package fileutils + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" +) + +// exclusion return true if the specified pattern is an exclusion +func exclusion(pattern string) bool { + return pattern[0] == '!' +} + +// empty return true if the specified pattern is empty +func empty(pattern string) bool { + return pattern == "" +} + +// CleanPatterns takes a slice of patterns returns a new +// slice of patterns cleaned with filepath.Clean, stripped +// of any empty patterns and lets the caller know whether the +// slice contains any exception patterns (prefixed with !). +func CleanPatterns(patterns []string) ([]string, [][]string, bool, error) { + // Loop over exclusion patterns and: + // 1. Clean them up. + // 2. Indicate whether we are dealing with any exception rules. + // 3. Error if we see a single exclusion marker on it's own (!). + cleanedPatterns := []string{} + patternDirs := [][]string{} + exceptions := false + for _, pattern := range patterns { + // Eliminate leading and trailing whitespace. + pattern = strings.TrimSpace(pattern) + if empty(pattern) { + continue + } + if exclusion(pattern) { + if len(pattern) == 1 { + return nil, nil, false, errors.New("Illegal exclusion pattern: !") + } + exceptions = true + } + pattern = filepath.Clean(pattern) + cleanedPatterns = append(cleanedPatterns, pattern) + if exclusion(pattern) { + pattern = pattern[1:] + } + patternDirs = append(patternDirs, strings.Split(pattern, "/")) + } + + return cleanedPatterns, patternDirs, exceptions, nil +} + +// Matches returns true if file matches any of the patterns +// and isn't excluded by any of the subsequent patterns. +func Matches(file string, patterns []string) (bool, error) { + file = filepath.Clean(file) + + if file == "." { + // Don't let them exclude everything, kind of silly. + return false, nil + } + + patterns, patDirs, _, err := CleanPatterns(patterns) + if err != nil { + return false, err + } + + return OptimizedMatches(file, patterns, patDirs) +} + +// OptimizedMatches is basically the same as fileutils.Matches() but optimized for archive.go. +// It will assume that the inputs have been preprocessed and therefore the function +// doen't need to do as much error checking and clean-up. This was done to avoid +// repeating these steps on each file being checked during the archive process. +// The more generic fileutils.Matches() can't make these assumptions. +func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, error) { + matched := false + parentPath := filepath.Dir(file) + parentPathDirs := strings.Split(parentPath, "/") + + for i, pattern := range patterns { + negative := false + + if exclusion(pattern) { + negative = true + pattern = pattern[1:] + } + + match, err := filepath.Match(pattern, file) + if err != nil { + return false, err + } + + if !match && parentPath != "." { + // Check to see if the pattern matches one of our parent dirs. + if len(patDirs[i]) <= len(parentPathDirs) { + match, _ = filepath.Match(strings.Join(patDirs[i], "/"), + strings.Join(parentPathDirs[:len(patDirs[i])], "/")) + } + } + + if match { + matched = !negative + } + } + + if matched { + logrus.Debugf("Skipping excluded path: %s", file) + } + + return matched, nil +} + +// CopyFile copies from src to dst until either EOF is reached +// on src or an error occurs. It verifies src exists and remove +// the dst if it exists. +func CopyFile(src, dst string) (int64, error) { + cleanSrc := filepath.Clean(src) + cleanDst := filepath.Clean(dst) + if cleanSrc == cleanDst { + return 0, nil + } + sf, err := os.Open(cleanSrc) + if err != nil { + return 0, err + } + defer sf.Close() + if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) { + return 0, err + } + df, err := os.Create(cleanDst) + if err != nil { + return 0, err + } + defer df.Close() + return io.Copy(df, sf) +} + +// ReadSymlinkedDirectory returns the target directory of a symlink. +// The target of the symbolic link may not be a file. +func ReadSymlinkedDirectory(path string) (string, error) { + var realPath string + var err error + if realPath, err = filepath.Abs(path); err != nil { + return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err) + } + if realPath, err = filepath.EvalSymlinks(realPath); err != nil { + return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err) + } + realPathInfo, err := os.Stat(realPath) + if err != nil { + return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err) + } + if !realPathInfo.Mode().IsDir() { + return "", fmt.Errorf("canonical path points to a file '%s'", realPath) + } + return realPath, nil +} + +// CreateIfNotExists creates a file or a directory only if it does not already exist. +func CreateIfNotExists(path string, isDir bool) error { + if _, err := os.Stat(path); err != nil { + if os.IsNotExist(err) { + if isDir { + return os.MkdirAll(path, 0755) + } + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + f, err := os.OpenFile(path, os.O_CREATE, 0755) + if err != nil { + return err + } + f.Close() + } + } + return nil +} diff --git a/vendor/src/github.com/docker/docker/pkg/fileutils/fileutils_test.go b/vendor/src/github.com/docker/docker/pkg/fileutils/fileutils_test.go new file mode 100644 index 00000000..b544ffbf --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/fileutils/fileutils_test.go @@ -0,0 +1,402 @@ +package fileutils + +import ( + "io/ioutil" + "os" + "path" + "path/filepath" + "testing" +) + +// CopyFile with invalid src +func TestCopyFileWithInvalidSrc(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + bytes, err := CopyFile("/invalid/file/path", path.Join(tempFolder, "dest")) + if err == nil { + t.Fatal("Should have fail to copy an invalid src file") + } + if bytes != 0 { + t.Fatal("Should have written 0 bytes") + } + +} + +// CopyFile with invalid dest +func TestCopyFileWithInvalidDest(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + src := path.Join(tempFolder, "file") + err = ioutil.WriteFile(src, []byte("content"), 0740) + if err != nil { + t.Fatal(err) + } + bytes, err := CopyFile(src, path.Join(tempFolder, "/invalid/dest/path")) + if err == nil { + t.Fatal("Should have fail to copy an invalid src file") + } + if bytes != 0 { + t.Fatal("Should have written 0 bytes") + } + +} + +// CopyFile with same src and dest +func TestCopyFileWithSameSrcAndDest(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + file := path.Join(tempFolder, "file") + err = ioutil.WriteFile(file, []byte("content"), 0740) + if err != nil { + t.Fatal(err) + } + bytes, err := CopyFile(file, file) + if err != nil { + t.Fatal(err) + } + if bytes != 0 { + t.Fatal("Should have written 0 bytes as it is the same file.") + } +} + +// CopyFile with same src and dest but path is different and not clean +func TestCopyFileWithSameSrcAndDestWithPathNameDifferent(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + testFolder := path.Join(tempFolder, "test") + err = os.MkdirAll(testFolder, 0740) + if err != nil { + t.Fatal(err) + } + file := path.Join(testFolder, "file") + sameFile := testFolder + "/../test/file" + err = ioutil.WriteFile(file, []byte("content"), 0740) + if err != nil { + t.Fatal(err) + } + bytes, err := CopyFile(file, sameFile) + if err != nil { + t.Fatal(err) + } + if bytes != 0 { + t.Fatal("Should have written 0 bytes as it is the same file.") + } +} + +func TestCopyFile(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + src := path.Join(tempFolder, "src") + dest := path.Join(tempFolder, "dest") + ioutil.WriteFile(src, []byte("content"), 0777) + ioutil.WriteFile(dest, []byte("destContent"), 0777) + bytes, err := CopyFile(src, dest) + if err != nil { + t.Fatal(err) + } + if bytes != 7 { + t.Fatalf("Should have written %d bytes but wrote %d", 7, bytes) + } + actual, err := ioutil.ReadFile(dest) + if err != nil { + t.Fatal(err) + } + if string(actual) != "content" { + t.Fatalf("Dest content was '%s', expected '%s'", string(actual), "content") + } +} + +// Reading a symlink to a directory must return the directory +func TestReadSymlinkedDirectoryExistingDirectory(t *testing.T) { + var err error + if err = os.Mkdir("/tmp/testReadSymlinkToExistingDirectory", 0777); err != nil { + t.Errorf("failed to create directory: %s", err) + } + + if err = os.Symlink("/tmp/testReadSymlinkToExistingDirectory", "/tmp/dirLinkTest"); err != nil { + t.Errorf("failed to create symlink: %s", err) + } + + var path string + if path, err = ReadSymlinkedDirectory("/tmp/dirLinkTest"); err != nil { + t.Fatalf("failed to read symlink to directory: %s", err) + } + + if path != "/tmp/testReadSymlinkToExistingDirectory" { + t.Fatalf("symlink returned unexpected directory: %s", path) + } + + if err = os.Remove("/tmp/testReadSymlinkToExistingDirectory"); err != nil { + t.Errorf("failed to remove temporary directory: %s", err) + } + + if err = os.Remove("/tmp/dirLinkTest"); err != nil { + t.Errorf("failed to remove symlink: %s", err) + } +} + +// Reading a non-existing symlink must fail +func TestReadSymlinkedDirectoryNonExistingSymlink(t *testing.T) { + var path string + var err error + if path, err = ReadSymlinkedDirectory("/tmp/test/foo/Non/ExistingPath"); err == nil { + t.Fatalf("error expected for non-existing symlink") + } + + if path != "" { + t.Fatalf("expected empty path, but '%s' was returned", path) + } +} + +// Reading a symlink to a file must fail +func TestReadSymlinkedDirectoryToFile(t *testing.T) { + var err error + var file *os.File + + if file, err = os.Create("/tmp/testReadSymlinkToFile"); err != nil { + t.Fatalf("failed to create file: %s", err) + } + + file.Close() + + if err = os.Symlink("/tmp/testReadSymlinkToFile", "/tmp/fileLinkTest"); err != nil { + t.Errorf("failed to create symlink: %s", err) + } + + var path string + if path, err = ReadSymlinkedDirectory("/tmp/fileLinkTest"); err == nil { + t.Fatalf("ReadSymlinkedDirectory on a symlink to a file should've failed") + } + + if path != "" { + t.Fatalf("path should've been empty: %s", path) + } + + if err = os.Remove("/tmp/testReadSymlinkToFile"); err != nil { + t.Errorf("failed to remove file: %s", err) + } + + if err = os.Remove("/tmp/fileLinkTest"); err != nil { + t.Errorf("failed to remove symlink: %s", err) + } +} + +func TestWildcardMatches(t *testing.T) { + match, _ := Matches("fileutils.go", []string{"*"}) + if match != true { + t.Errorf("failed to get a wildcard match, got %v", match) + } +} + +// A simple pattern match should return true. +func TestPatternMatches(t *testing.T) { + match, _ := Matches("fileutils.go", []string{"*.go"}) + if match != true { + t.Errorf("failed to get a match, got %v", match) + } +} + +// An exclusion followed by an inclusion should return true. +func TestExclusionPatternMatchesPatternBefore(t *testing.T) { + match, _ := Matches("fileutils.go", []string{"!fileutils.go", "*.go"}) + if match != true { + t.Errorf("failed to get true match on exclusion pattern, got %v", match) + } +} + +// A folder pattern followed by an exception should return false. +func TestPatternMatchesFolderExclusions(t *testing.T) { + match, _ := Matches("docs/README.md", []string{"docs", "!docs/README.md"}) + if match != false { + t.Errorf("failed to get a false match on exclusion pattern, got %v", match) + } +} + +// A folder pattern followed by an exception should return false. +func TestPatternMatchesFolderWithSlashExclusions(t *testing.T) { + match, _ := Matches("docs/README.md", []string{"docs/", "!docs/README.md"}) + if match != false { + t.Errorf("failed to get a false match on exclusion pattern, got %v", match) + } +} + +// A folder pattern followed by an exception should return false. +func TestPatternMatchesFolderWildcardExclusions(t *testing.T) { + match, _ := Matches("docs/README.md", []string{"docs/*", "!docs/README.md"}) + if match != false { + t.Errorf("failed to get a false match on exclusion pattern, got %v", match) + } +} + +// A pattern followed by an exclusion should return false. +func TestExclusionPatternMatchesPatternAfter(t *testing.T) { + match, _ := Matches("fileutils.go", []string{"*.go", "!fileutils.go"}) + if match != false { + t.Errorf("failed to get false match on exclusion pattern, got %v", match) + } +} + +// A filename evaluating to . should return false. +func TestExclusionPatternMatchesWholeDirectory(t *testing.T) { + match, _ := Matches(".", []string{"*.go"}) + if match != false { + t.Errorf("failed to get false match on ., got %v", match) + } +} + +// A single ! pattern should return an error. +func TestSingleExclamationError(t *testing.T) { + _, err := Matches("fileutils.go", []string{"!"}) + if err == nil { + t.Errorf("failed to get an error for a single exclamation point, got %v", err) + } +} + +// A string preceded with a ! should return true from Exclusion. +func TestExclusion(t *testing.T) { + exclusion := exclusion("!") + if !exclusion { + t.Errorf("failed to get true for a single !, got %v", exclusion) + } +} + +// Matches with no patterns +func TestMatchesWithNoPatterns(t *testing.T) { + matches, err := Matches("/any/path/there", []string{}) + if err != nil { + t.Fatal(err) + } + if matches { + t.Fatalf("Should not have match anything") + } +} + +// Matches with malformed patterns +func TestMatchesWithMalformedPatterns(t *testing.T) { + matches, err := Matches("/any/path/there", []string{"["}) + if err == nil { + t.Fatal("Should have failed because of a malformed syntax in the pattern") + } + if matches { + t.Fatalf("Should not have match anything") + } +} + +// An empty string should return true from Empty. +func TestEmpty(t *testing.T) { + empty := empty("") + if !empty { + t.Errorf("failed to get true for an empty string, got %v", empty) + } +} + +func TestCleanPatterns(t *testing.T) { + cleaned, _, _, _ := CleanPatterns([]string{"docs", "config"}) + if len(cleaned) != 2 { + t.Errorf("expected 2 element slice, got %v", len(cleaned)) + } +} + +func TestCleanPatternsStripEmptyPatterns(t *testing.T) { + cleaned, _, _, _ := CleanPatterns([]string{"docs", "config", ""}) + if len(cleaned) != 2 { + t.Errorf("expected 2 element slice, got %v", len(cleaned)) + } +} + +func TestCleanPatternsExceptionFlag(t *testing.T) { + _, _, exceptions, _ := CleanPatterns([]string{"docs", "!docs/README.md"}) + if !exceptions { + t.Errorf("expected exceptions to be true, got %v", exceptions) + } +} + +func TestCleanPatternsLeadingSpaceTrimmed(t *testing.T) { + _, _, exceptions, _ := CleanPatterns([]string{"docs", " !docs/README.md"}) + if !exceptions { + t.Errorf("expected exceptions to be true, got %v", exceptions) + } +} + +func TestCleanPatternsTrailingSpaceTrimmed(t *testing.T) { + _, _, exceptions, _ := CleanPatterns([]string{"docs", "!docs/README.md "}) + if !exceptions { + t.Errorf("expected exceptions to be true, got %v", exceptions) + } +} + +func TestCleanPatternsErrorSingleException(t *testing.T) { + _, _, _, err := CleanPatterns([]string{"!"}) + if err == nil { + t.Errorf("expected error on single exclamation point, got %v", err) + } +} + +func TestCleanPatternsFolderSplit(t *testing.T) { + _, dirs, _, _ := CleanPatterns([]string{"docs/config/CONFIG.md"}) + if dirs[0][0] != "docs" { + t.Errorf("expected first element in dirs slice to be docs, got %v", dirs[0][1]) + } + if dirs[0][1] != "config" { + t.Errorf("expected first element in dirs slice to be config, got %v", dirs[0][1]) + } +} + +func TestCreateIfNotExistsDir(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempFolder) + + folderToCreate := filepath.Join(tempFolder, "tocreate") + + if err := CreateIfNotExists(folderToCreate, true); err != nil { + t.Fatal(err) + } + fileinfo, err := os.Stat(folderToCreate) + if err != nil { + t.Fatalf("Should have create a folder, got %v", err) + } + + if !fileinfo.IsDir() { + t.Fatalf("Should have been a dir, seems it's not") + } +} + +func TestCreateIfNotExistsFile(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempFolder) + + fileToCreate := filepath.Join(tempFolder, "file/to/create") + + if err := CreateIfNotExists(fileToCreate, false); err != nil { + t.Fatal(err) + } + fileinfo, err := os.Stat(fileToCreate) + if err != nil { + t.Fatalf("Should have create a file, got %v", err) + } + + if fileinfo.IsDir() { + t.Fatalf("Should have been a file, seems it's not") + } +} diff --git a/vendor/src/github.com/docker/docker/pkg/fileutils/fileutils_unix.go b/vendor/src/github.com/docker/docker/pkg/fileutils/fileutils_unix.go new file mode 100644 index 00000000..d5c3abf5 --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/fileutils/fileutils_unix.go @@ -0,0 +1,22 @@ +// +build linux freebsd + +package fileutils + +import ( + "fmt" + "io/ioutil" + "os" + + "github.com/Sirupsen/logrus" +) + +// GetTotalUsedFds Returns the number of used File Descriptors by +// reading it via /proc filesystem. +func GetTotalUsedFds() int { + if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { + logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err) + } else { + return len(fds) + } + return -1 +} diff --git a/vendor/src/github.com/docker/docker/pkg/fileutils/fileutils_windows.go b/vendor/src/github.com/docker/docker/pkg/fileutils/fileutils_windows.go new file mode 100644 index 00000000..5ec21cac --- /dev/null +++ b/vendor/src/github.com/docker/docker/pkg/fileutils/fileutils_windows.go @@ -0,0 +1,7 @@ +package fileutils + +// GetTotalUsedFds Returns the number of used File Descriptors. Not supported +// on Windows. +func GetTotalUsedFds() int { + return -1 +} From 188ae1af9c8f3e804e487b5682b882d1fd71eda6 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Fri, 25 Sep 2015 10:11:10 +0300 Subject: [PATCH 063/131] COPY handle excludes correctly --- src/rocker/build2/copy.go | 81 ++++++++++++-- src/rocker/build2/copy_test.go | 192 +++++++++++++++++++++++++++++++-- 2 files changed, 253 insertions(+), 20 deletions(-) diff --git a/src/rocker/build2/copy.go b/src/rocker/build2/copy.go index f86fa22c..406b0ce0 100644 --- a/src/rocker/build2/copy.go +++ b/src/rocker/build2/copy.go @@ -26,9 +26,9 @@ import ( "path/filepath" "strings" + "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/tarsum" "github.com/docker/docker/pkg/units" - "github.com/fsouza/go-dockerclient/vendor/github.com/docker/docker/pkg/fileutils" "github.com/kr/pretty" log "github.com/Sirupsen/logrus" @@ -202,6 +202,14 @@ func listFiles(srcPath string, includes, excludes []string) ([]*uploadFile, erro // TODO: support urls // TODO: support local archives (and maybe a remote archives as well) + excludes, patDirs, exceptions, err := fileutils.CleanPatterns(excludes) + if err != nil { + return nil, err + } + + // TODO: here we remove some exclude patterns, how about patDirs? + excludes, nestedPatterns := findNestedPatterns(excludes) + for _, pattern := range includes { matches, err := filepath.Glob(filepath.Join(srcPath, pattern)) @@ -226,27 +234,41 @@ func listFiles(srcPath string, includes, excludes []string) ([]*uploadFile, erro return err } - // TODO: ensure explicit include does not get excluded by the following rule // TODO: ensure ignoring works correctly, maybe improve .dockerignore to work more like .gitignore? - skip, err := fileutils.Matches(relFilePath, excludes) - if err != nil { - return err + skip := false + skipNested := false + + // Here we want to keep files that are specified explicitly in the includes, + // no matter what. For example, .dockerignore can have some wildcard items + // specified, by in COPY we want explicitly add a file, that could be ignored + // otherwise using a wildcard or directory COPY + if pattern != relFilePath { + if skip, err = fileutils.OptimizedMatches(relFilePath, excludes, patDirs); err != nil { + return err + } + if skipNested, err = matchNested(relFilePath, nestedPatterns); err != nil { + return err + } } - if skip { + + if skip || skipNested { + if !exceptions && info.IsDir() { + return filepath.SkipDir + } return nil } // TODO: read links? - // skip checking if symlinks point to non-existing file - // also skip named pipes, because they hanging on open - if info.Mode()&(os.ModeSymlink|os.ModeNamedPipe) != 0 { + // not interested in dirs, since we walk already + if info.IsDir() { return nil } - // not interested in dirs, since we walk already - if info.IsDir() { + // skip checking if symlinks point to non-existing file + // also skip named pipes, because they hanging on open + if info.Mode()&(os.ModeSymlink|os.ModeNamedPipe) != 0 { return nil } @@ -313,3 +335,40 @@ func commonPrefix(a, b string) (prefix string) { } return } + +type nestedPattern struct { + prefix string + pattern string +} + +func (p nestedPattern) Match(path string) (bool, error) { + if !strings.HasPrefix(path, p.prefix) { + return false, nil + } + return filepath.Match(p.pattern, filepath.Base(path)) +} + +func matchNested(path string, patterns []nestedPattern) (bool, error) { + for _, p := range patterns { + if m, err := p.Match(path); err != nil || m { + return m, err + } + } + return false, nil +} + +func findNestedPatterns(excludes []string) (newExcludes []string, nested []nestedPattern) { + newExcludes = []string{} + nested = []nestedPattern{} + for _, e := range excludes { + i := strings.Index(e, "**/") + // keep exclude + if i < 0 { + newExcludes = append(newExcludes, e) + continue + } + // make a nested pattern + nested = append(nested, nestedPattern{e[:i], e[i+3:]}) + } + return newExcludes, nested +} diff --git a/src/rocker/build2/copy_test.go b/src/rocker/build2/copy_test.go index b0728486..54ea2330 100644 --- a/src/rocker/build2/copy_test.go +++ b/src/rocker/build2/copy_test.go @@ -32,7 +32,7 @@ import ( "github.com/docker/docker/pkg/tarsum" ) -func TestListFiles_Basic(t *testing.T) { +func TestCopy_ListFiles_Basic(t *testing.T) { tmpDir := makeTmpDir(t, map[string]string{ "file1.txt": "hello", }) @@ -63,7 +63,7 @@ func TestListFiles_Basic(t *testing.T) { } } -func TestListFiles_Wildcard(t *testing.T) { +func TestCopy_ListFiles_Wildcard(t *testing.T) { tmpDir := makeTmpDir(t, map[string]string{ "file1.txt": "hello", "file2.txt": "hello", @@ -96,7 +96,7 @@ func TestListFiles_Wildcard(t *testing.T) { } } -func TestListFiles_Dir_Simple(t *testing.T) { +func TestCopy_ListFiles_Dir_Simple(t *testing.T) { tmpDir := makeTmpDir(t, map[string]string{ "dir/foo.txt": "hello", "dir/bar.txt": "hello", @@ -129,7 +129,7 @@ func TestListFiles_Dir_Simple(t *testing.T) { } } -func TestListFiles_Dir_AndFiles(t *testing.T) { +func TestCopy_ListFiles_Dir_AndFiles(t *testing.T) { tmpDir := makeTmpDir(t, map[string]string{ "test.txt": "hello", "dir/foo.txt": "hello", @@ -164,7 +164,7 @@ func TestListFiles_Dir_AndFiles(t *testing.T) { } } -func TestListFiles_Dir_Multi(t *testing.T) { +func TestCopy_ListFiles_Dir_Multi(t *testing.T) { tmpDir := makeTmpDir(t, map[string]string{ "a/test.txt": "hello", "b/1.txt": "hello", @@ -206,7 +206,181 @@ func TestListFiles_Dir_Multi(t *testing.T) { } } -func TestMakeTarStream_Basic(t *testing.T) { +func TestCopy_ListFiles_Excludes_Basic(t *testing.T) { + tmpDir := makeTmpDir(t, map[string]string{ + "test1.txt": "hello", + "test2.txt": "hello", + }) + defer os.RemoveAll(tmpDir) + + includes := []string{ + "*.txt", + } + excludes := []string{ + "test2.txt", + } + + matches, err := listFiles(tmpDir, includes, excludes) + if err != nil { + t.Fatal(err) + } + + t.Logf("includes: %# v", pretty.Formatter(includes)) + t.Logf("excludes: %# v", pretty.Formatter(excludes)) + t.Logf("matches: %# v", pretty.Formatter(matches)) + + assertions := [][2]string{ + {tmpDir + "/test1.txt", "test1.txt"}, + } + + assert.Len(t, matches, len(assertions)) + for i, a := range assertions { + assert.Equal(t, a[0], matches[i].src, "bad match src at index %d", i) + assert.Equal(t, a[1], matches[i].dest, "bad match dest at index %d", i) + } +} + +func TestCopy_ListFiles_Excludes_Explicit(t *testing.T) { + tmpDir := makeTmpDir(t, map[string]string{ + "test1.txt": "hello", + "test2.txt": "hello", + "test3.txt": "hello", + }) + defer os.RemoveAll(tmpDir) + + includes := []string{ + "test2.txt", + } + excludes := []string{ + "*.txt", + } + + matches, err := listFiles(tmpDir, includes, excludes) + if err != nil { + t.Fatal(err) + } + + t.Logf("includes: %# v", pretty.Formatter(includes)) + t.Logf("excludes: %# v", pretty.Formatter(excludes)) + t.Logf("matches: %# v", pretty.Formatter(matches)) + + assertions := [][2]string{ + {tmpDir + "/test2.txt", "test2.txt"}, + } + + assert.Len(t, matches, len(assertions)) + for i, a := range assertions { + assert.Equal(t, a[0], matches[i].src, "bad match src at index %d", i) + assert.Equal(t, a[1], matches[i].dest, "bad match dest at index %d", i) + } +} + +func TestCopy_ListFiles_Excludes_Exception(t *testing.T) { + tmpDir := makeTmpDir(t, map[string]string{ + "test1.txt": "hello", + "test2.txt": "hello", + "test3.txt": "hello", + }) + defer os.RemoveAll(tmpDir) + + includes := []string{ + "*", + } + excludes := []string{ + "*.txt", + "!test2.txt", + } + + matches, err := listFiles(tmpDir, includes, excludes) + if err != nil { + t.Fatal(err) + } + + t.Logf("includes: %# v", pretty.Formatter(includes)) + t.Logf("excludes: %# v", pretty.Formatter(excludes)) + t.Logf("matches: %# v", pretty.Formatter(matches)) + + assertions := [][2]string{ + {tmpDir + "/test2.txt", "test2.txt"}, + } + + assert.Len(t, matches, len(assertions)) + for i, a := range assertions { + assert.Equal(t, a[0], matches[i].src, "bad match src at index %d", i) + assert.Equal(t, a[1], matches[i].dest, "bad match dest at index %d", i) + } +} + +func TestCopy_ListFiles_Excludes_Dir(t *testing.T) { + tmpDir := makeTmpDir(t, map[string]string{ + "a/test1.txt": "hello", + "b/test2.txt": "hello", + }) + defer os.RemoveAll(tmpDir) + + includes := []string{ + ".", + } + excludes := []string{ + "b", + } + + matches, err := listFiles(tmpDir, includes, excludes) + if err != nil { + t.Fatal(err) + } + + t.Logf("includes: %# v", pretty.Formatter(includes)) + t.Logf("excludes: %# v", pretty.Formatter(excludes)) + t.Logf("matches: %# v", pretty.Formatter(matches)) + + assertions := [][2]string{ + {tmpDir + "/a/test1.txt", "a/test1.txt"}, + } + + assert.Len(t, matches, len(assertions)) + for i, a := range assertions { + assert.Equal(t, a[0], matches[i].src, "bad match src at index %d", i) + assert.Equal(t, a[1], matches[i].dest, "bad match dest at index %d", i) + } +} + +func TestCopy_ListFiles_Excludes_FileInAnyDir(t *testing.T) { + tmpDir := makeTmpDir(t, map[string]string{ + "a/test1.txt": "hello", + "b/test2.txt": "hello", + "c/d/e/test2.txt": "hello", + }) + defer os.RemoveAll(tmpDir) + + includes := []string{ + ".", + } + excludes := []string{ + "**/test2.txt", + } + + matches, err := listFiles(tmpDir, includes, excludes) + if err != nil { + t.Fatal(err) + } + + t.Logf("includes: %# v", pretty.Formatter(includes)) + t.Logf("excludes: %# v", pretty.Formatter(excludes)) + t.Logf("matches: %# v", pretty.Formatter(matches)) + + assertions := [][2]string{ + {tmpDir + "/a/test1.txt", "a/test1.txt"}, + } + + assert.Len(t, matches, len(assertions)) + for i, a := range assertions { + assert.Equal(t, a[0], matches[i].src, "bad match src at index %d", i) + assert.Equal(t, a[1], matches[i].dest, "bad match dest at index %d", i) + } +} + +func TestCopy_MakeTarStream_Basic(t *testing.T) { tmpDir := makeTmpDir(t, map[string]string{ "a/test.txt": "hello", "b/1.txt": "hello", @@ -243,7 +417,7 @@ func TestMakeTarStream_Basic(t *testing.T) { assert.Equal(t, assertion, out, "bad tar content") } -func TestMakeTarStream_Rename(t *testing.T) { +func TestCopy_MakeTarStream_Rename(t *testing.T) { tmpDir := makeTmpDir(t, map[string]string{ "a/test.txt": "hello", }) @@ -269,7 +443,7 @@ func TestMakeTarStream_Rename(t *testing.T) { assert.Equal(t, assertion, out, "bad tar content") } -func TestMakeTarStream_OneFileToDir(t *testing.T) { +func TestCopy_MakeTarStream_OneFileToDir(t *testing.T) { tmpDir := makeTmpDir(t, map[string]string{ "a/test.txt": "hello", }) @@ -295,7 +469,7 @@ func TestMakeTarStream_OneFileToDir(t *testing.T) { assert.Equal(t, assertion, out, "bad tar content") } -func TestMakeTarStream_CurrentDir(t *testing.T) { +func TestCopy_MakeTarStream_CurrentDir(t *testing.T) { tmpDir := makeTmpDir(t, map[string]string{ "a/test.txt": "hello", "b/1.txt": "hello", From bbc80777a2dac8e3c7298aa187ee9a4b54025681 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Fri, 25 Sep 2015 10:35:11 +0300 Subject: [PATCH 064/131] read .dockerignore --- src/cmd/rocker/main.go | 33 ++++++++----- src/rocker/build2/build.go | 29 +++++++---- src/rocker/build2/commands.go | 2 +- src/rocker/build2/copy.go | 12 ++--- src/rocker/build2/dockerignore.go | 68 ++++++++++++++++++++++++++ src/rocker/build2/dockerignore_test.go | 55 +++++++++++++++++++++ 6 files changed, 169 insertions(+), 30 deletions(-) create mode 100644 src/rocker/build2/dockerignore.go create mode 100644 src/rocker/build2/dockerignore_test.go diff --git a/src/cmd/rocker/main.go b/src/cmd/rocker/main.go index 5f404d2c..2f967dce 100644 --- a/src/cmd/rocker/main.go +++ b/src/cmd/rocker/main.go @@ -20,7 +20,6 @@ import ( "encoding/json" "fmt" "os" - "path" "path/filepath" "strings" @@ -216,7 +215,7 @@ func buildCommand(c *cli.Context) { if configFilename == "-" { - rockerfile, err = build2.NewRockerfile(path.Base(wd), os.Stdin, vars, template.Funs{}) + rockerfile, err = build2.NewRockerfile(filepath.Base(wd), os.Stdin, vars, template.Funs{}) if err != nil { log.Fatal(err) } @@ -224,7 +223,7 @@ func buildCommand(c *cli.Context) { } else { if !filepath.IsAbs(configFilename) { - configFilename = path.Join(wd, configFilename) + configFilename = filepath.Join(wd, configFilename) } rockerfile, err = build2.NewRockerfileFromFile(configFilename, vars, template.Funs{}) @@ -235,7 +234,7 @@ func buildCommand(c *cli.Context) { if len(args) > 0 { contextDir = args[0] if !filepath.IsAbs(contextDir) { - contextDir = path.Join(wd, args[0]) + contextDir = filepath.Join(wd, args[0]) } } } @@ -245,6 +244,15 @@ func buildCommand(c *cli.Context) { os.Exit(0) } + dockerignore := []string{} + + dockerignoreFilename := filepath.Join(contextDir, ".dockerignore") + if _, err := os.Stat(dockerignoreFilename); err == nil { + if dockerignore, err = build2.ReadDockerignoreFile(dockerignoreFilename); err != nil { + log.Fatal(err) + } + } + dockerClient, err := dockerclient.NewFromCli(c) if err != nil { log.Fatal(err) @@ -261,14 +269,15 @@ func buildCommand(c *cli.Context) { client := build2.NewDockerClient(dockerClient, auth) builder := build2.New(client, rockerfile, build2.Config{ - InStream: os.Stdin, - OutStream: os.Stdout, - ContextDir: contextDir, - Pull: c.Bool("pull"), - NoGarbage: c.Bool("no-garbage"), - Attach: c.Bool("attach"), - Verbose: c.GlobalBool("verbose"), - ID: c.String("id"), + InStream: os.Stdin, + OutStream: os.Stdout, + ContextDir: contextDir, + Dockerignore: dockerignore, + Pull: c.Bool("pull"), + NoGarbage: c.Bool("no-garbage"), + Attach: c.Bool("attach"), + Verbose: c.GlobalBool("verbose"), + ID: c.String("id"), }) plan, err := build2.NewPlan(rockerfile.Commands(), true) diff --git a/src/rocker/build2/build.go b/src/rocker/build2/build.go index f1ff04e4..690452f5 100644 --- a/src/rocker/build2/build.go +++ b/src/rocker/build2/build.go @@ -35,14 +35,15 @@ var ( ) type Config struct { - OutStream io.Writer - InStream io.ReadCloser - ContextDir string - ID string - Pull bool - NoGarbage bool - Attach bool - Verbose bool + OutStream io.Writer + InStream io.ReadCloser + ContextDir string + ID string + Dockerignore []string + Pull bool + NoGarbage bool + Attach bool + Verbose bool } type State struct { @@ -55,6 +56,7 @@ type State struct { ProducedImage bool CmdSet bool InjectCommands []string + Dockerignore []string } type Build struct { @@ -65,11 +67,18 @@ type Build struct { } func New(client Client, rockerfile *Rockerfile, cfg Config) *Build { - return &Build{ + b := &Build{ rockerfile: rockerfile, cfg: cfg, client: client, - state: State{}, + } + b.state = b.NewState() + return b +} + +func (b *Build) NewState() State { + return State{ + Dockerignore: b.cfg.Dockerignore, } } diff --git a/src/rocker/build2/commands.go b/src/rocker/build2/commands.go index bf2a65a6..4fb392e8 100644 --- a/src/rocker/build2/commands.go +++ b/src/rocker/build2/commands.go @@ -216,7 +216,7 @@ func (c *CommandCleanup) Execute(b *Build) (State, error) { // Cleanup state dirtyState := s - s = State{} + s = b.NewState() // Keep some stuff between froms s.ExportsID = dirtyState.ExportsID diff --git a/src/rocker/build2/copy.go b/src/rocker/build2/copy.go index 406b0ce0..cc3ea38f 100644 --- a/src/rocker/build2/copy.go +++ b/src/rocker/build2/copy.go @@ -59,13 +59,11 @@ func copyFiles(b *Build, args []string, cmdName string) (s State, err error) { } var ( - tarSum tarsum.TarSum - src = args[0 : len(args)-1] - dest = filepath.FromSlash(args[len(args)-1]) // last one is always the dest - u *upload - - // TODO: read .dockerignore - excludes = []string{} + tarSum tarsum.TarSum + src = args[0 : len(args)-1] + dest = filepath.FromSlash(args[len(args)-1]) // last one is always the dest + u *upload + excludes = s.Dockerignore ) // If destination is not a directory (no leading slash) diff --git a/src/rocker/build2/dockerignore.go b/src/rocker/build2/dockerignore.go new file mode 100644 index 00000000..c29f2046 --- /dev/null +++ b/src/rocker/build2/dockerignore.go @@ -0,0 +1,68 @@ +/*- + * Copyright 2015 Grammarly, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package build2 + +import ( + "bufio" + "io" + "os" + "path/filepath" + "regexp" + "strings" +) + +// TODO: maybe move some stuff from copy.go here + +var ( + DockerignoreCommendRegexp = regexp.MustCompile("\\s*#.*") +) + +func ReadDockerignoreFile(file string) ([]string, error) { + fd, err := os.Open(file) + if err != nil { + return nil, err + } + defer fd.Close() + + return ReadDockerignore(fd) +} + +func ReadDockerignore(r io.Reader) ([]string, error) { + var ( + scanner = bufio.NewScanner(r) + result = []string{} + ) + + for scanner.Scan() { + // Strip comments + line := scanner.Text() + line = DockerignoreCommendRegexp.ReplaceAllString(line, "") + // Eliminate leading and trailing whitespace. + pattern := strings.TrimSpace(line) + if pattern == "" { + continue + } + pattern = filepath.Clean(pattern) + result = append(result, pattern) + } + + if err := scanner.Err(); err != nil { + return nil, err + } + + return result, nil +} diff --git a/src/rocker/build2/dockerignore_test.go b/src/rocker/build2/dockerignore_test.go new file mode 100644 index 00000000..c673c0f3 --- /dev/null +++ b/src/rocker/build2/dockerignore_test.go @@ -0,0 +1,55 @@ +/*- + * Copyright 2015 Grammarly, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package build2 + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestDockerignore_Read(t *testing.T) { + content := ` + # commend +README.md +**/*.o +!result.o + +# Some comment + .idea +.git + +a/b/../c # inline commend +` + + result, err := ReadDockerignore(strings.NewReader(content)) + if err != nil { + t.Fatal(err) + } + + expected := []string{ + "README.md", + "**/*.o", + "!result.o", + ".idea", + ".git", + "a/c", + } + + assert.Equal(t, expected, result) +} From c79538b3ebdf9e0093f12059bff29f80ddbfda9a Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Fri, 25 Sep 2015 10:55:49 +0300 Subject: [PATCH 065/131] calculate and report image's final size --- src/cmd/rocker/main.go | 8 +++++++- src/rocker/build2/build.go | 3 +++ src/rocker/build2/build_test.go | 4 ++-- src/rocker/build2/client.go | 10 +++++----- src/rocker/build2/commands.go | 11 ++++++++++- src/rocker/build2/commands_test.go | 6 ++++-- 6 files changed, 31 insertions(+), 11 deletions(-) diff --git a/src/cmd/rocker/main.go b/src/cmd/rocker/main.go index 2f967dce..9227ab77 100644 --- a/src/cmd/rocker/main.go +++ b/src/cmd/rocker/main.go @@ -30,6 +30,7 @@ import ( "rocker/template" "github.com/codegangsta/cli" + "github.com/docker/docker/pkg/units" "github.com/fsouza/go-dockerclient" log "github.com/Sirupsen/logrus" @@ -289,7 +290,12 @@ func buildCommand(c *cli.Context) { log.Fatal(err) } - log.Infof("Successfully built %.12s", builder.GetImageID()) + size := fmt.Sprintf("final size %s (+%s from the base image)", + units.HumanSize(float64(builder.VirtualSize)), + units.HumanSize(float64(builder.ProducedSize)), + ) + + log.Infof("Successfully built %.12s | %s", builder.GetImageID(), size) // builder := build.Builder{ // Rockerfile: configFilename, diff --git a/src/rocker/build2/build.go b/src/rocker/build2/build.go index 690452f5..51be4c6a 100644 --- a/src/rocker/build2/build.go +++ b/src/rocker/build2/build.go @@ -60,6 +60,9 @@ type State struct { } type Build struct { + ProducedSize int64 + VirtualSize int64 + rockerfile *Rockerfile cfg Config client Client diff --git a/src/rocker/build2/build_test.go b/src/rocker/build2/build_test.go index c2226f19..a2d766bc 100644 --- a/src/rocker/build2/build_test.go +++ b/src/rocker/build2/build_test.go @@ -89,9 +89,9 @@ func (m *MockClient) RunContainer(containerID string, attach bool) error { return args.Error(0) } -func (m *MockClient) CommitContainer(state State, message string) (string, error) { +func (m *MockClient) CommitContainer(state State, message string) (*docker.Image, error) { args := m.Called(state, message) - return args.String(0), args.Error(1) + return args.Get(0).(*docker.Image), args.Error(1) } func (m *MockClient) RemoveContainer(containerID string) error { diff --git a/src/rocker/build2/client.go b/src/rocker/build2/client.go index 226c1790..efd8d36d 100644 --- a/src/rocker/build2/client.go +++ b/src/rocker/build2/client.go @@ -44,7 +44,7 @@ type Client interface { EnsureImage(imageName string) error CreateContainer(state State) (id string, err error) RunContainer(containerID string, attachStdin bool) error - CommitContainer(state State, message string) (imageID string, err error) + CommitContainer(state State, message string) (img *docker.Image, err error) RemoveContainer(containerID string) error UploadToContainer(containerID string, stream io.Reader, path string) error EnsureContainer(containerName string, config *docker.Config, purpose string) (containerID string, err error) @@ -274,7 +274,7 @@ func (c *DockerClient) RunContainer(containerID string, attachStdin bool) error return nil } -func (c *DockerClient) CommitContainer(s State, message string) (string, error) { +func (c *DockerClient) CommitContainer(s State, message string) (*docker.Image, error) { commitOpts := docker.CommitContainerOptions{ Container: s.ContainerID, Message: message, @@ -285,14 +285,14 @@ func (c *DockerClient) CommitContainer(s State, message string) (string, error) image, err := c.client.CommitContainer(commitOpts) if err != nil { - return "", err + return nil, err } // Inspect the image to get the real size log.Debugf("Inspect image %s", image.ID) if image, err = c.client.InspectImage(image.ID); err != nil { - return "", err + return nil, err } size := fmt.Sprintf("%s (+%s)", @@ -304,7 +304,7 @@ func (c *DockerClient) CommitContainer(s State, message string) (string, error) "size": size, }).Infof("| Result image is %.12s", image.ID) - return image.ID, nil + return image, nil } func (c *DockerClient) RemoveContainer(containerID string) error { diff --git a/src/rocker/build2/commands.go b/src/rocker/build2/commands.go index 4fb392e8..5979d525 100644 --- a/src/rocker/build2/commands.go +++ b/src/rocker/build2/commands.go @@ -162,6 +162,9 @@ func (c *CommandFrom) Execute(b *Build) (s State, err error) { s.ImageID = img.ID s.Config = *img.Config + b.ProducedSize = 0 + b.VirtualSize = img.VirtualSize + // If we don't have OnBuild triggers, then we are done if len(s.Config.OnBuild) == 0 { return s, nil @@ -275,12 +278,18 @@ func (c *CommandCommit) Execute(b *Build) (s State, err error) { } }(s.ContainerID) - if s.ImageID, err = b.client.CommitContainer(s, message); err != nil { + var img *docker.Image + if img, err = b.client.CommitContainer(s, message); err != nil { return s, err } + s.ImageID = img.ID s.ProducedImage = true + // Store some stuff to the build + b.ProducedSize += img.Size + b.VirtualSize = img.VirtualSize + return s, nil } diff --git a/src/rocker/build2/commands_test.go b/src/rocker/build2/commands_test.go index 4d676e43..04b3dd8b 100644 --- a/src/rocker/build2/commands_test.go +++ b/src/rocker/build2/commands_test.go @@ -161,10 +161,11 @@ func TestCommandCommit_Simple(t *testing.T) { cmd := &CommandCommit{} origCommitMsg := []string{"a", "b"} + resultImage := &docker.Image{ID: "789"} b.state.ContainerID = "456" b.state.CommitMsg = origCommitMsg - c.On("CommitContainer", mock.AnythingOfType("State"), "a; b").Return("789", nil).Once() + c.On("CommitContainer", mock.AnythingOfType("State"), "a; b").Return(resultImage, nil).Once() c.On("RemoveContainer", "456").Return(nil).Once() state, err := cmd.Execute(b) @@ -185,6 +186,7 @@ func TestCommandCommit_NoContainer(t *testing.T) { cmd := &CommandCommit{} origCommitMsg := []string{"a", "b"} + resultImage := &docker.Image{ID: "789"} b.state.CommitMsg = origCommitMsg c.On("CreateContainer", mock.AnythingOfType("State")).Return("456", nil).Run(func(args mock.Arguments) { @@ -192,7 +194,7 @@ func TestCommandCommit_NoContainer(t *testing.T) { assert.Equal(t, []string{"/bin/sh", "-c", "#(nop) a; b"}, arg.Config.Cmd) }).Once() - c.On("CommitContainer", mock.AnythingOfType("State"), "a; b").Return("789", nil).Once() + c.On("CommitContainer", mock.AnythingOfType("State"), "a; b").Return(resultImage, nil).Once() c.On("RemoveContainer", "456").Return(nil).Once() state, err := cmd.Execute(b) From 3d3e7bd7737d32ffc008554808a3c53f3e74cd91 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Sat, 26 Sep 2015 12:09:46 +0300 Subject: [PATCH 066/131] improve COPY --- src/rocker/build2/copy.go | 38 +++++--- src/rocker/build2/copy_test.go | 164 ++++++++++++++++++++++++++++++++- 2 files changed, 188 insertions(+), 14 deletions(-) diff --git a/src/rocker/build2/copy.go b/src/rocker/build2/copy.go index cc3ea38f..ab5f3faa 100644 --- a/src/rocker/build2/copy.go +++ b/src/rocker/build2/copy.go @@ -45,9 +45,10 @@ type upload struct { } type uploadFile struct { - src string - dest string - size int64 + src string + dest string + relDest string + size int64 } func copyFiles(b *Build, args []string, cmdName string) (s State, err error) { @@ -143,15 +144,27 @@ func makeTarStream(srcPath, dest, cmdName string, includes, excludes []string) ( return u, nil } - // If destination is not a directory (no leading slash) - if !strings.HasSuffix(u.dest, sep) { - // If we transfer a single file and the destination is not a directory, - // then rename it and remove prefix - if len(u.files) == 1 { + // If we transfer a single item and the destination is not a directory (no leading slash) + if !strings.HasSuffix(u.dest, sep) && len(includes) == 1 { + item := filepath.Clean(includes[0]) + // If we've got a single file that was explicitly pointed in the source item + // we need to replace its name with the destination + // e.g. COPY src/foo.txt /app/bar.txt + if len(u.files) == 1 && filepath.Clean(u.files[0].relDest) == item { u.files[0].dest = strings.TrimLeft(u.dest, sep) u.dest = "" + } else if !containsWildcards(item) { + // The source item is a directory but not a wildcard, so we need to rename only + // the first bit e.g. COPY foo /src + for i := range u.files { + relDest, err := filepath.Rel(item, u.files[i].dest) + if err != nil { + return u, err + } + u.files[i].dest = relDest + } + u.dest += sep } else { - // add leading slash for more then one file u.dest += sep } } @@ -288,9 +301,10 @@ func listFiles(srcPath string, includes, excludes []string) ([]*uploadFile, erro } result = append(result, &uploadFile{ - src: path, - dest: resultFilePath, - size: info.Size(), + src: path, + dest: resultFilePath, + relDest: relFilePath, + size: info.Size(), }) return nil diff --git a/src/rocker/build2/copy_test.go b/src/rocker/build2/copy_test.go index 54ea2330..e469aa84 100644 --- a/src/rocker/build2/copy_test.go +++ b/src/rocker/build2/copy_test.go @@ -399,6 +399,10 @@ func TestCopy_MakeTarStream_Basic(t *testing.T) { excludes := []string{} dest := "/" + t.Logf("includes: %# v", pretty.Formatter(includes)) + t.Logf("excludes: %# v", pretty.Formatter(excludes)) + t.Logf("dest: %# v", pretty.Formatter(dest)) + stream, err := makeTarStream(tmpDir, dest, "COPY", includes, excludes) if err != nil { t.Fatal(err) @@ -417,18 +421,22 @@ func TestCopy_MakeTarStream_Basic(t *testing.T) { assert.Equal(t, assertion, out, "bad tar content") } -func TestCopy_MakeTarStream_Rename(t *testing.T) { +func TestCopy_MakeTarStream_FileRename(t *testing.T) { tmpDir := makeTmpDir(t, map[string]string{ "a/test.txt": "hello", }) defer os.RemoveAll(tmpDir) includes := []string{ - "a/test.txt", + "./a/test.txt", } excludes := []string{} dest := "/src/x.txt" + t.Logf("includes: %# v", pretty.Formatter(includes)) + t.Logf("excludes: %# v", pretty.Formatter(excludes)) + t.Logf("dest: %# v", pretty.Formatter(dest)) + stream, err := makeTarStream(tmpDir, dest, "COPY", includes, excludes) if err != nil { t.Fatal(err) @@ -455,6 +463,10 @@ func TestCopy_MakeTarStream_OneFileToDir(t *testing.T) { excludes := []string{} dest := "/src/" + t.Logf("includes: %# v", pretty.Formatter(includes)) + t.Logf("excludes: %# v", pretty.Formatter(excludes)) + t.Logf("dest: %# v", pretty.Formatter(dest)) + stream, err := makeTarStream(tmpDir, dest, "COPY", includes, excludes) if err != nil { t.Fatal(err) @@ -486,6 +498,10 @@ func TestCopy_MakeTarStream_CurrentDir(t *testing.T) { excludes := []string{} dest := "/go/app/src" + t.Logf("includes: %# v", pretty.Formatter(includes)) + t.Logf("excludes: %# v", pretty.Formatter(excludes)) + t.Logf("dest: %# v", pretty.Formatter(dest)) + stream, err := makeTarStream(tmpDir, dest, "COPY", includes, excludes) if err != nil { t.Fatal(err) @@ -505,6 +521,150 @@ func TestCopy_MakeTarStream_CurrentDir(t *testing.T) { assert.Equal(t, assertion, out, "bad tar content") } +func TestCopy_MakeTarStream_DirRename(t *testing.T) { + tmpDir := makeTmpDir(t, map[string]string{ + "c/foo.txt": "hello", + "c/x/1.txt": "hello", + "c/x/2.txt": "hello", + }) + defer os.RemoveAll(tmpDir) + + // ADD ./c /src --> /src + // ADD ./a/b[/1,2] /src -> /src[/1,2] + + includes := []string{ + "./c", + } + excludes := []string{} + dest := "/src" + + t.Logf("includes: %# v", pretty.Formatter(includes)) + t.Logf("excludes: %# v", pretty.Formatter(excludes)) + t.Logf("dest: %# v", pretty.Formatter(dest)) + + stream, err := makeTarStream(tmpDir, dest, "COPY", includes, excludes) + if err != nil { + t.Fatal(err) + } + + out := writeReadTar(t, tmpDir, stream.tar) + + assertion := strings.Join([]string{ + "src/foo.txt", + "src/x/1.txt", + "src/x/2.txt", + }, "\n") + "\n" + + assert.Equal(t, assertion, out, "bad tar content") +} + +func TestCopy_MakeTarStream_DirRenameLeadingSlash(t *testing.T) { + tmpDir := makeTmpDir(t, map[string]string{ + "c/foo.txt": "hello", + "c/x/1.txt": "hello", + "c/x/2.txt": "hello", + }) + defer os.RemoveAll(tmpDir) + + // ADD ./c /src --> /src + // ADD ./a/b[/1,2] /src -> /src[/1,2] + + includes := []string{ + "./c/", + } + excludes := []string{} + dest := "/src" + + t.Logf("includes: %# v", pretty.Formatter(includes)) + t.Logf("excludes: %# v", pretty.Formatter(excludes)) + t.Logf("dest: %# v", pretty.Formatter(dest)) + + stream, err := makeTarStream(tmpDir, dest, "COPY", includes, excludes) + if err != nil { + t.Fatal(err) + } + + out := writeReadTar(t, tmpDir, stream.tar) + + assertion := strings.Join([]string{ + "src/foo.txt", + "src/x/1.txt", + "src/x/2.txt", + }, "\n") + "\n" + + assert.Equal(t, assertion, out, "bad tar content") +} + +func TestCopy_MakeTarStream_DirRenameWildcard(t *testing.T) { + tmpDir := makeTmpDir(t, map[string]string{ + "c/foo.txt": "hello", + "c/x/1.txt": "hello", + "c/x/2.txt": "hello", + }) + defer os.RemoveAll(tmpDir) + + // ADD ./c /src --> /src + // ADD ./a/b[/1,2] /src -> /src[/1,2] + + includes := []string{ + "*", + } + excludes := []string{} + dest := "/src" + + t.Logf("includes: %# v", pretty.Formatter(includes)) + t.Logf("excludes: %# v", pretty.Formatter(excludes)) + t.Logf("dest: %# v", pretty.Formatter(dest)) + + stream, err := makeTarStream(tmpDir, dest, "COPY", includes, excludes) + if err != nil { + t.Fatal(err) + } + + out := writeReadTar(t, tmpDir, stream.tar) + + assertion := strings.Join([]string{ + "src/c/foo.txt", + "src/c/x/1.txt", + "src/c/x/2.txt", + }, "\n") + "\n" + + assert.Equal(t, assertion, out, "bad tar content") +} + +func TestCopy_MakeTarStream_SingleFileDirRename(t *testing.T) { + tmpDir := makeTmpDir(t, map[string]string{ + "c/foo.txt": "hello", + }) + defer os.RemoveAll(tmpDir) + + // ADD ./c /src --> /src + // ADD ./a/b[/1,2] /src -> /src[/1,2] + + includes := []string{ + "./c", + } + excludes := []string{} + dest := "/src" + + t.Logf("includes: %# v", pretty.Formatter(includes)) + t.Logf("excludes: %# v", pretty.Formatter(excludes)) + t.Logf("dest: %# v", pretty.Formatter(dest)) + + stream, err := makeTarStream(tmpDir, dest, "COPY", includes, excludes) + if err != nil { + t.Fatal(err) + } + + out := writeReadTar(t, tmpDir, stream.tar) + + assertion := strings.Join([]string{ + "src/foo.txt", + }, "\n") + "\n" + + assert.Equal(t, assertion, out, "bad tar content") +} + // helper functions func makeTmpDir(t *testing.T, files map[string]string) string { From 8b988280f562266331d143ceca013b3e00a6c89d Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Sat, 26 Sep 2015 12:10:01 +0300 Subject: [PATCH 067/131] print separator before subsequent FROM --- src/rocker/build2/commands.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/rocker/build2/commands.go b/src/rocker/build2/commands.go index 5979d525..3daea982 100644 --- a/src/rocker/build2/commands.go +++ b/src/rocker/build2/commands.go @@ -227,6 +227,8 @@ func (c *CommandCleanup) Execute(b *Build) (State, error) { // For final cleanup we want to keep imageID if c.final { s.ImageID = dirtyState.ImageID + } else { + log.Infof("====================================") } return s, nil From c841c36557e2d3c5d6043ee78827aaaec0aabb40 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Sat, 26 Sep 2015 12:10:24 +0300 Subject: [PATCH 068/131] main: log fatal if fail to read Rockerfile --- src/cmd/rocker/main.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/cmd/rocker/main.go b/src/cmd/rocker/main.go index 9227ab77..bec5b9dd 100644 --- a/src/cmd/rocker/main.go +++ b/src/cmd/rocker/main.go @@ -228,6 +228,9 @@ func buildCommand(c *cli.Context) { } rockerfile, err = build2.NewRockerfileFromFile(configFilename, vars, template.Funs{}) + if err != nil { + log.Fatal(err) + } // Initialize context dir contextDir = filepath.Dir(configFilename) From 22226abdb7846126c2f8fc30e389aa76adb13c9e Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Sat, 26 Sep 2015 12:10:30 +0300 Subject: [PATCH 069/131] v1 TODO --- README.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/README.md b/README.md index cd10514e..fd77423c 100644 --- a/README.md +++ b/README.md @@ -5,6 +5,12 @@ Rocker breaks the limits of Dockerfile. It adds some crucial features that are m # *NOTE on v1 branch* In this branch we are developing the new experimental implementation of Rocker that will be completely client-side driven, with no fallback on `docker build`. This means faster builds and more power. No build context uploads anymore. Also, the builder code is completely rewritten and made much more testable and extensible in the future. Caching might be also rethought. Cross-server builds determinism is our dream. +### v1 TODO + +- [ ] Cache +- [ ] FROM scratch +- [ ] ADD urls and local archives + --- * [Installation](#installation) From 41a2f5472da771cc41394da97868a76356c69261 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Sat, 26 Sep 2015 15:11:54 +0300 Subject: [PATCH 070/131] cache! --- src/cmd/rocker/main.go | 10 +- src/rocker/build2/build.go | 98 +++++++----- src/rocker/build2/build_test.go | 14 +- src/rocker/build2/cache.go | 91 ++++++++++++ src/rocker/build2/cache_test.go | 68 +++++++++ src/rocker/build2/commands.go | 229 +++++++++++++++++++++++------ src/rocker/build2/commands_test.go | 29 ++-- src/rocker/build2/compare.go | 94 ++++++++++++ src/rocker/build2/copy.go | 10 +- src/rocker/build2/plan.go | 6 +- src/rocker/build2/state.go | 60 ++++++++ 11 files changed, 606 insertions(+), 103 deletions(-) create mode 100644 src/rocker/build2/cache.go create mode 100644 src/rocker/build2/cache_test.go create mode 100644 src/rocker/build2/compare.go create mode 100644 src/rocker/build2/state.go diff --git a/src/cmd/rocker/main.go b/src/cmd/rocker/main.go index bec5b9dd..5c7f4665 100644 --- a/src/cmd/rocker/main.go +++ b/src/cmd/rocker/main.go @@ -272,7 +272,13 @@ func buildCommand(c *cli.Context) { client := build2.NewDockerClient(dockerClient, auth) - builder := build2.New(client, rockerfile, build2.Config{ + var cache build2.Cache + if !c.Bool("no-cache") { + // TODO: configurable cache dir + cache = build2.NewCacheFS(os.Getenv("HOME") + "/.rocker_cache") + } + + builder := build2.New(client, rockerfile, cache, build2.Config{ InStream: os.Stdin, OutStream: os.Stdout, ContextDir: contextDir, @@ -282,6 +288,8 @@ func buildCommand(c *cli.Context) { Attach: c.Bool("attach"), Verbose: c.GlobalBool("verbose"), ID: c.String("id"), + NoCache: c.Bool("no-cache"), + Push: c.Bool("push"), }) plan, err := build2.NewPlan(rockerfile.Commands(), true) diff --git a/src/rocker/build2/build.go b/src/rocker/build2/build.go index 51be4c6a..18ff30e8 100644 --- a/src/rocker/build2/build.go +++ b/src/rocker/build2/build.go @@ -17,8 +17,10 @@ package build2 import ( + "fmt" "io" + "github.com/docker/docker/pkg/units" "github.com/fatih/color" "github.com/fsouza/go-dockerclient" @@ -44,53 +46,48 @@ type Config struct { NoGarbage bool Attach bool Verbose bool -} - -type State struct { - Config docker.Config - HostConfig docker.HostConfig - ImageID string - ContainerID string - ExportsID string - CommitMsg []string - ProducedImage bool - CmdSet bool - InjectCommands []string - Dockerignore []string + NoCache bool + Push bool } type Build struct { ProducedSize int64 VirtualSize int64 - rockerfile *Rockerfile - cfg Config - client Client - state State + rockerfile *Rockerfile + cache Cache + cfg Config + client Client + state State + cacheBusted bool } -func New(client Client, rockerfile *Rockerfile, cfg Config) *Build { +func New(client Client, rockerfile *Rockerfile, cache Cache, cfg Config) *Build { b := &Build{ rockerfile: rockerfile, + cache: cache, cfg: cfg, client: client, } - b.state = b.NewState() + b.state = NewState(b) return b } -func (b *Build) NewState() State { - return State{ - Dockerignore: b.cfg.Dockerignore, - } -} - func (b *Build) Run(plan Plan) (err error) { for k := 0; k < len(plan); k++ { c := plan[k] log.Debugf("Step %d: %# v", k+1, pretty.Formatter(c)) + + var doRun bool + if doRun, err = c.ShouldRun(b); err != nil { + return err + } + if !doRun { + continue + } + log.Infof("%s", color.New(color.FgWhite, color.Bold).SprintFunc()(c)) if b.state, err = c.Execute(b); err != nil { @@ -130,6 +127,49 @@ func (b *Build) GetImageID() string { return b.state.ImageID } +func (b *Build) probeCache(s State) (cachedState State, hit bool, err error) { + if b.cache == nil || b.cacheBusted { + return s, false, nil + } + + var s2 *State + if s2, err = b.cache.Get(s); err != nil { + return s, false, err + } + if s2 == nil { + b.cacheBusted = true + log.Info(color.New(color.FgYellow).SprintFunc()("| Not cached")) + return s, false, nil + } + + var img *docker.Image + if img, err = b.client.InspectImage(s2.ImageID); err != nil { + return s, true, err + } + if img == nil { + log.Warnf("Cannot find the cached image %.12s, consider cleaning the cache", s2.ImageID) + return s, false, nil + } + + size := fmt.Sprintf("%s (+%s)", + units.HumanSize(float64(img.VirtualSize)), + units.HumanSize(float64(img.Size)), + ) + + log.WithFields(log.Fields{ + "size": size, + }).Infof(color.New(color.FgGreen).SprintfFunc()("| Take image %.12s from cache", s2.ImageID)) + + // Store some stuff to the build + b.ProducedSize += img.Size + b.VirtualSize = img.VirtualSize + + // TODO: maybe move somewhere + s2.Commits = []string{} + + return *s2, true, nil +} + func (b *Build) getVolumeContainer(path string) (name string, err error) { name = b.mountsContainerName(path) @@ -174,11 +214,3 @@ func (b *Build) getExportsContainer() (name string, err error) { return containerID, nil } - -func (s *State) Commit(msg string) { - s.CommitMsg = append(s.CommitMsg, msg) -} - -func (s *State) SkipCommit() { - s.Commit(COMMIT_SKIP) -} diff --git a/src/rocker/build2/build_test.go b/src/rocker/build2/build_test.go index a2d766bc..aab0d4c3 100644 --- a/src/rocker/build2/build_test.go +++ b/src/rocker/build2/build_test.go @@ -45,7 +45,7 @@ func makeBuild(t *testing.T, rockerfileContent string, cfg Config) (*Build, *Moc } c := &MockClient{} - b := New(c, r, cfg) + b := New(c, r, nil, cfg) return b, c } @@ -118,3 +118,15 @@ func (m *MockClient) EnsureContainer(containerName string, config *docker.Config args := m.Called(containerName, config, purpose) return args.String(0), args.Error(1) } + +// type MockCache struct { +// mock.Mock +// } + +// func (m *MockCache) Get(s State) (s2 *State, err error) { + +// } + +// func (m *MockCache) Put(s State) error { + +// } diff --git a/src/rocker/build2/cache.go b/src/rocker/build2/cache.go new file mode 100644 index 00000000..116b6014 --- /dev/null +++ b/src/rocker/build2/cache.go @@ -0,0 +1,91 @@ +/*- + * Copyright 2015 Grammarly, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package build2 + +import ( + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + + log "github.com/Sirupsen/logrus" +) + +type Cache interface { + Get(s State) (s2 *State, err error) + Put(s State) error +} + +type CacheFS struct { + root string +} + +func NewCacheFS(root string) *CacheFS { + return &CacheFS{ + root: root, + } +} + +func (c *CacheFS) Get(s State) (res *State, err error) { + match := filepath.Join(c.root, s.ImageID) + + err = filepath.Walk(match, func(path string, info os.FileInfo, err error) error { + if err != nil && os.IsNotExist(err) { + return nil + } + if info.IsDir() { + return nil + } + + s2 := State{} + data, err := ioutil.ReadFile(path) + if err != nil { + return err + } + if err := json.Unmarshal(data, &s2); err != nil { + return err + } + + log.Debugf("CACHE COMPARE %s %s %q %q", s.ImageID, s2.ImageID, s.Commits, s2.Commits) + + if s.Equals(s2) { + res = &s2 + return filepath.SkipDir + } + return nil + }) + + if err == filepath.SkipDir { + return res, nil + } + + return +} + +func (c *CacheFS) Put(s State) error { + log.Debugf("CACHE PUT %s %s %q", s.ParentID, s.ImageID, s.Commits) + + fileName := filepath.Join(c.root, s.ParentID, s.ImageID) + ".json" + if err := os.MkdirAll(filepath.Dir(fileName), 0755); err != nil { + return err + } + data, err := json.Marshal(s) + if err != nil { + return err + } + return ioutil.WriteFile(fileName, data, 0644) +} diff --git a/src/rocker/build2/cache_test.go b/src/rocker/build2/cache_test.go new file mode 100644 index 00000000..0844cdd0 --- /dev/null +++ b/src/rocker/build2/cache_test.go @@ -0,0 +1,68 @@ +/*- + * Copyright 2015 Grammarly, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package build2 + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestCache_Basic(t *testing.T) { + tmpDir := cacheTestTmpDir(t) + defer os.RemoveAll(tmpDir) + + c := NewCacheFS(tmpDir) + + s := State{ + ParentID: "123", + ImageID: "456", + } + if err := c.Put(s); err != nil { + t.Fatal(err) + } + + s2 := State{ + ImageID: "123", + } + res, err := c.Get(s2) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, "456", res.ImageID) + + s3 := State{ + ImageID: "789", + } + res2, err := c.Get(s3) + if err != nil { + t.Fatal(err) + } + + assert.Nil(t, res2) +} + +func cacheTestTmpDir(t *testing.T) string { + tmpDir, err := ioutil.TempDir("", "rocker-cache-test") + if err != nil { + t.Fatal(err) + } + return tmpDir +} diff --git a/src/rocker/build2/commands.go b/src/rocker/build2/commands.go index 3daea982..13552174 100644 --- a/src/rocker/build2/commands.go +++ b/src/rocker/build2/commands.go @@ -52,6 +52,9 @@ type Command interface { // and passed to the external command implementations. Execute(b *Build) (State, error) + // Returns true if the command should be executed + ShouldRun(b *Build) (bool, error) + // String returns the human readable string representation of the command String() string } @@ -119,6 +122,10 @@ func (c *CommandFrom) String() string { return c.cfg.original } +func (c *CommandFrom) ShouldRun(b *Build) (bool, error) { + return true, nil +} + func (c *CommandFrom) Execute(b *Build) (s State, err error) { // TODO: for "scratch" image we may use /images/create @@ -188,6 +195,10 @@ func (c *CommandMaintainer) String() string { return c.cfg.original } +func (c *CommandMaintainer) ShouldRun(b *Build) (bool, error) { + return true, nil +} + func (c *CommandMaintainer) Execute(b *Build) (State, error) { if len(c.cfg.args) != 1 { return b.state, fmt.Errorf("MAINTAINER requires exactly one argument") @@ -208,6 +219,10 @@ func (c *CommandCleanup) String() string { return "Cleaning up" } +func (c *CommandCleanup) ShouldRun(b *Build) (bool, error) { + return true, nil +} + func (c *CommandCleanup) Execute(b *Build) (State, error) { s := b.state @@ -219,7 +234,7 @@ func (c *CommandCleanup) Execute(b *Build) (State, error) { // Cleanup state dirtyState := s - s = b.NewState() + s = NewState(b) // Keep some stuff between froms s.ExportsID = dirtyState.ExportsID @@ -241,30 +256,33 @@ func (c *CommandCommit) String() string { return "Commit changes" } +func (c *CommandCommit) ShouldRun(b *Build) (bool, error) { + return b.state.GetCommits() != "", nil +} + func (c *CommandCommit) Execute(b *Build) (s State, err error) { s = b.state - // Collect commits that are not skipped - commits := []string{} - for _, msg := range s.CommitMsg { - if msg != COMMIT_SKIP { - commits = append(commits, msg) - } + commits := s.GetCommits() + if commits == "" { + return s, nil } - // Reset collected commit messages after the commit - s.CommitMsg = []string{} + // TODO: ? + // if len(commits) == 0 && s.ContainerID == "" { log.Infof("| Skip") - if len(commits) == 0 && s.ContainerID == "" { - log.Infof("| Skip") + // Check cache + s, hit, err := b.probeCache(s) + if err != nil { + return s, err + } + if hit { return s, nil } - message := strings.Join(commits, "; ") - if s.ContainerID == "" { origCmd := s.Config.Cmd - s.Config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + message} + s.Config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + commits} if s.ContainerID, err = b.client.CreateContainer(s); err != nil { return s, err @@ -274,20 +292,26 @@ func (c *CommandCommit) Execute(b *Build) (s State, err error) { } defer func(id string) { - s.ContainerID = "" + s.Commits = []string{} if err = b.client.RemoveContainer(id); err != nil { log.Errorf("Failed to remove temporary container %.12s, error: %s", id, err) } }(s.ContainerID) var img *docker.Image - if img, err = b.client.CommitContainer(s, message); err != nil { + if img, err = b.client.CommitContainer(s, commits); err != nil { return s, err } + s.ContainerID = "" + s.ParentID = s.ImageID s.ImageID = img.ID s.ProducedImage = true + if b.cache != nil { + b.cache.Put(s) + } + // Store some stuff to the build b.ProducedSize += img.Size b.VirtualSize = img.VirtualSize @@ -304,6 +328,10 @@ func (c *CommandRun) String() string { return c.cfg.original } +func (c *CommandRun) ShouldRun(b *Build) (bool, error) { + return true, nil +} + func (c *CommandRun) Execute(b *Build) (s State, err error) { s = b.state @@ -317,6 +345,17 @@ func (c *CommandRun) Execute(b *Build) (s State, err error) { cmd = append([]string{"/bin/sh", "-c"}, cmd...) } + s.Commit("RUN %q", cmd) + + // Check cache + s, hit, err := b.probeCache(s) + if err != nil { + return s, err + } + if hit { + return s, nil + } + // TODO: test with ENTRYPOINT // We run this command in the container using CMD @@ -347,13 +386,18 @@ func (c *CommandAttach) String() string { return c.cfg.original } +func (c *CommandAttach) ShouldRun(b *Build) (bool, error) { + // TODO: skip attach? + return true, nil +} + func (c *CommandAttach) Execute(b *Build) (s State, err error) { s = b.state // simply ignore this command if we don't wanna attach if !b.cfg.Attach { log.Infof("Skip ATTACH; use --attach option to get inside") - s.SkipCommit() + // s.SkipCommit() return s, nil } @@ -369,6 +413,8 @@ func (c *CommandAttach) Execute(b *Build) (s State, err error) { cmd = append([]string{"/bin/sh", "-c"}, cmd...) } + // TODO: do s.commit unique + // We run this command in the container using CMD // Backup the config so we can restore it later @@ -407,6 +453,10 @@ func (c *CommandEnv) String() string { return c.cfg.original } +func (c *CommandEnv) ShouldRun(b *Build) (bool, error) { + return true, nil +} + func (c *CommandEnv) Execute(b *Build) (s State, err error) { s = b.state @@ -457,6 +507,10 @@ func (c *CommandLabel) String() string { return c.cfg.original } +func (c *CommandLabel) ShouldRun(b *Build) (bool, error) { + return true, nil +} + func (c *CommandLabel) Execute(b *Build) (s State, err error) { s = b.state @@ -501,6 +555,10 @@ func (c *CommandWorkdir) String() string { return c.cfg.original } +func (c *CommandWorkdir) ShouldRun(b *Build) (bool, error) { + return true, nil +} + func (c *CommandWorkdir) Execute(b *Build) (s State, err error) { s = b.state @@ -532,6 +590,10 @@ func (c *CommandCmd) String() string { return c.cfg.original } +func (c *CommandCmd) ShouldRun(b *Build) (bool, error) { + return true, nil +} + func (c *CommandCmd) Execute(b *Build) (s State, err error) { s = b.state @@ -561,6 +623,10 @@ func (c *CommandEntrypoint) String() string { return c.cfg.original } +func (c *CommandEntrypoint) ShouldRun(b *Build) (bool, error) { + return true, nil +} + func (c *CommandEntrypoint) Execute(b *Build) (s State, err error) { s = b.state @@ -599,6 +665,10 @@ func (c *CommandExpose) String() string { return c.cfg.original } +func (c *CommandExpose) ShouldRun(b *Build) (bool, error) { + return true, nil +} + func (c *CommandExpose) Execute(b *Build) (s State, err error) { s = b.state @@ -646,6 +716,10 @@ func (c *CommandVolume) String() string { return c.cfg.original } +func (c *CommandVolume) ShouldRun(b *Build) (bool, error) { + return true, nil +} + func (c *CommandVolume) Execute(b *Build) (s State, err error) { s = b.state @@ -679,6 +753,10 @@ func (c *CommandUser) String() string { return c.cfg.original } +func (c *CommandUser) ShouldRun(b *Build) (bool, error) { + return true, nil +} + func (c *CommandUser) Execute(b *Build) (s State, err error) { s = b.state @@ -703,6 +781,10 @@ func (c *CommandOnbuild) String() string { return c.cfg.original } +func (c *CommandOnbuild) ShouldRun(b *Build) (bool, error) { + return true, nil +} + func (c *CommandOnbuild) Execute(b *Build) (s State, err error) { s = b.state @@ -736,6 +818,10 @@ func (c *CommandTag) String() string { return c.cfg.original } +func (c *CommandTag) ShouldRun(b *Build) (bool, error) { + return true, nil +} + func (c *CommandTag) Execute(b *Build) (State, error) { if len(c.cfg.args) != 1 { return b.state, fmt.Errorf("TAG requires exactly one argument") @@ -761,6 +847,10 @@ func (c *CommandPush) String() string { return c.cfg.original } +func (c *CommandPush) ShouldRun(b *Build) (bool, error) { + return true, nil +} + func (c *CommandPush) Execute(b *Build) (State, error) { if len(c.cfg.args) != 1 { return b.state, fmt.Errorf("PUSH requires exactly one argument") @@ -774,6 +864,11 @@ func (c *CommandPush) Execute(b *Build) (State, error) { return b.state, err } + if !b.cfg.Push { + log.Infof("| Don't push. Pass --push flag to actually push to the registry") + return b.state, nil + } + if err := b.client.PushImage(c.cfg.args[0]); err != nil { return b.state, err } @@ -790,6 +885,10 @@ func (c *CommandCopy) String() string { return c.cfg.original } +func (c *CommandCopy) ShouldRun(b *Build) (bool, error) { + return true, nil +} + func (c *CommandCopy) Execute(b *Build) (State, error) { if len(c.cfg.args) < 2 { return b.state, fmt.Errorf("COPY requires at least two arguments") @@ -807,6 +906,10 @@ func (c *CommandAdd) String() string { return c.cfg.original } +func (c *CommandAdd) ShouldRun(b *Build) (bool, error) { + return true, nil +} + func (c *CommandAdd) Execute(b *Build) (State, error) { if len(c.cfg.args) < 2 { return b.state, fmt.Errorf("ADD requires at least two arguments") @@ -823,6 +926,10 @@ func (c *CommandMount) String() string { return c.cfg.original } +func (c *CommandMount) ShouldRun(b *Build) (bool, error) { + return true, nil +} + func (c *CommandMount) Execute(b *Build) (s State, err error) { s = b.state @@ -894,6 +1001,10 @@ func (c *CommandExport) String() string { return c.cfg.original } +func (c *CommandExport) ShouldRun(b *Build) (bool, error) { + return true, nil +} + func (c *CommandExport) Execute(b *Build) (s State, err error) { s = b.state @@ -909,6 +1020,7 @@ func (c *CommandExport) Execute(b *Build) (s State, err error) { args = []string{args[0], "/"} } + src := args[0 : len(args)-1] dest := args[len(args)-1] // last one is always the dest // EXPORT /my/dir my_dir --> /EXPORT_VOLUME/my_dir @@ -917,11 +1029,27 @@ func (c *CommandExport) Execute(b *Build) (s State, err error) { // EXPORT /my/dir /stuff/ --> /EXPORT_VOLUME/stuff/my_dir // EXPORT /my/dir/* / --> /EXPORT_VOLUME/stuff/my_dir - exportsContainerName, err := b.getExportsContainer() + exportsContainerID, err := b.getExportsContainer() if err != nil { return s, err } + // build the command + cmdDestPath, err := util.ResolvePath(ExportsPath, dest) + if err != nil { + return s, fmt.Errorf("Invalid EXPORT destination: %s", dest) + } + + s.Commit("EXPORT %q to %.12s:%s", src, exportsContainerID, dest) + + s, hit, err := b.probeCache(s) + if err != nil { + return s, err + } + if hit { + return s, nil + } + // Remember original stuff so we can restore it when we finished var exportsID string origState := s @@ -932,13 +1060,7 @@ func (c *CommandExport) Execute(b *Build) (s State, err error) { }() // Append exports container as a volume - s.HostConfig.VolumesFrom = []string{exportsContainerName} - - // build the command - cmdDestPath, err := util.ResolvePath(ExportsPath, dest) - if err != nil { - return s, fmt.Errorf("Invalid EXPORT destination: %s", dest) - } + s.HostConfig.VolumesFrom = []string{exportsContainerID} cmd := []string{"/opt/rsync/bin/rsync", "-a", "--delete-during"} @@ -946,17 +1068,12 @@ func (c *CommandExport) Execute(b *Build) (s State, err error) { cmd = append(cmd, "--verbose") } - cmd = append(cmd, args[0:len(args)-1]...) + cmd = append(cmd, src...) cmd = append(cmd, cmdDestPath) s.Config.Cmd = cmd s.Config.Entrypoint = []string{} - // For caching - // builder.addLabels(map[string]string{ - // "rocker-exportsContainerId": exportsContainerID, - // }) - if exportsID, err = b.client.CreateContainer(s); err != nil { return s, err } @@ -980,6 +1097,10 @@ func (c *CommandImport) String() string { return c.cfg.original } +func (c *CommandImport) ShouldRun(b *Build) (bool, error) { + return true, nil +} + func (c *CommandImport) Execute(b *Build) (s State, err error) { s = b.state args := c.cfg.args @@ -991,12 +1112,34 @@ func (c *CommandImport) Execute(b *Build) (s State, err error) { return s, fmt.Errorf("You have to EXPORT something first in order to IMPORT") } + log.Infof("| Import from %s", b.exportsContainerName()) + // If only one argument was given to IMPORT, use the same path for destination // IMPORT /my/dir/file.tar --> ADD ./EXPORT_VOLUME/my/dir/file.tar /my/dir/file.tar if len(args) < 2 { args = []string{args[0], "/"} } dest := args[len(args)-1] // last one is always the dest + src := []string{} + + for _, arg := range args[0 : len(args)-1] { + argResolved, err := util.ResolvePath(ExportsPath, arg) + if err != nil { + return s, fmt.Errorf("Invalid IMPORT source: %s", arg) + } + src = append(src, argResolved) + } + + s.Commit("IMPORT %.12s:%q %s", s.ExportsID, src, dest) + + // Check cache + s, hit, err := b.probeCache(s) + if err != nil { + return s, err + } + if hit { + return s, nil + } // Remember original stuff so we can restore it when we finished origState := s @@ -1005,11 +1148,7 @@ func (c *CommandImport) Execute(b *Build) (s State, err error) { defer func() { s = origState - - if err == nil { - s.ContainerID = importID - s.Commit(fmt.Sprintf("IMPORT %s %q", s.ExportsID, args)) - } + s.ContainerID = importID }() cmd := []string{"/opt/rsync/bin/rsync", "-a"} @@ -1018,13 +1157,7 @@ func (c *CommandImport) Execute(b *Build) (s State, err error) { cmd = append(cmd, "--verbose") } - for _, arg := range args[0 : len(args)-1] { - argResolved, err := util.ResolvePath(ExportsPath, arg) - if err != nil { - return s, fmt.Errorf("Invalid IMPORT source: %s", arg) - } - cmd = append(cmd, argResolved) - } + cmd = append(cmd, src...) cmd = append(cmd, dest) s.Config.Cmd = cmd @@ -1038,15 +1171,9 @@ func (c *CommandImport) Execute(b *Build) (s State, err error) { log.Infof("| Running in %.12s: %s", importID, strings.Join(cmd, " ")) if err = b.client.RunContainer(importID, false); err != nil { - b.client.RemoveContainer(importID) return s, err } - // For caching - // builder.addLabels(map[string]string{ - // "rocker-lastExportImageId": builder.lastExportImageID, - // }) - return s, nil } @@ -1059,6 +1186,10 @@ func (c *CommandOnbuildWrap) String() string { return "ONBUILD " + c.cmd.String() } +func (c *CommandOnbuildWrap) ShouldRun(b *Build) (bool, error) { + return true, nil +} + func (c *CommandOnbuildWrap) Execute(b *Build) (State, error) { return c.cmd.Execute(b) } diff --git a/src/rocker/build2/commands_test.go b/src/rocker/build2/commands_test.go index 04b3dd8b..6f8cee1f 100644 --- a/src/rocker/build2/commands_test.go +++ b/src/rocker/build2/commands_test.go @@ -160,10 +160,9 @@ func TestCommandCommit_Simple(t *testing.T) { b, c := makeBuild(t, "", Config{}) cmd := &CommandCommit{} - origCommitMsg := []string{"a", "b"} resultImage := &docker.Image{ID: "789"} b.state.ContainerID = "456" - b.state.CommitMsg = origCommitMsg + b.state.Commit("a").Commit("b") c.On("CommitContainer", mock.AnythingOfType("State"), "a; b").Return(resultImage, nil).Once() c.On("RemoveContainer", "456").Return(nil).Once() @@ -174,8 +173,8 @@ func TestCommandCommit_Simple(t *testing.T) { } c.AssertExpectations(t) - assert.Equal(t, origCommitMsg, b.state.CommitMsg) - assert.Equal(t, []string{}, state.CommitMsg) + assert.Equal(t, "a; b", b.state.GetCommits()) + assert.Equal(t, "", state.GetCommits()) assert.Equal(t, []string(nil), state.Config.Cmd) assert.Equal(t, "789", state.ImageID) assert.Equal(t, "", state.ContainerID) @@ -185,9 +184,8 @@ func TestCommandCommit_NoContainer(t *testing.T) { b, c := makeBuild(t, "", Config{}) cmd := &CommandCommit{} - origCommitMsg := []string{"a", "b"} resultImage := &docker.Image{ID: "789"} - b.state.CommitMsg = origCommitMsg + b.state.Commit("a").Commit("b") c.On("CreateContainer", mock.AnythingOfType("State")).Return("456", nil).Run(func(args mock.Arguments) { arg := args.Get(0).(State) @@ -203,8 +201,8 @@ func TestCommandCommit_NoContainer(t *testing.T) { } c.AssertExpectations(t) - assert.Equal(t, origCommitMsg, b.state.CommitMsg) - assert.Equal(t, []string{}, state.CommitMsg) + assert.Equal(t, "a; b", b.state.GetCommits()) + assert.Equal(t, "", state.GetCommits()) assert.Equal(t, "789", state.ImageID) assert.Equal(t, "", state.ContainerID) } @@ -232,7 +230,7 @@ func TestCommandEnv_Simple(t *testing.T) { t.Fatal(err) } - assert.Equal(t, []string{"ENV type=web env=prod"}, state.CommitMsg) + assert.Equal(t, "ENV type=web env=prod", state.GetCommits()) assert.Equal(t, []string{"type=web", "env=prod"}, state.Config.Env) } @@ -249,7 +247,7 @@ func TestCommandEnv_Advanced(t *testing.T) { t.Fatal(err) } - assert.Equal(t, []string{"ENV type=web env=prod"}, state.CommitMsg) + assert.Equal(t, "ENV type=web env=prod", state.GetCommits()) assert.Equal(t, []string{"env=prod", "version=1.2.3", "type=web"}, state.Config.Env) } @@ -273,7 +271,7 @@ func TestCommandLabel_Simple(t *testing.T) { t.Logf("Result labels: %# v", pretty.Formatter(state.Config.Labels)) - assert.Equal(t, []string{"LABEL type=web env=prod"}, state.CommitMsg) + assert.Equal(t, "LABEL type=web env=prod", state.GetCommits()) assert.True(t, reflect.DeepEqual(state.Config.Labels, expectedLabels), "bad result labels") } @@ -301,7 +299,7 @@ func TestCommandLabel_Advanced(t *testing.T) { t.Logf("Result labels: %# v", pretty.Formatter(state.Config.Labels)) - assert.Equal(t, []string{"LABEL type=web env=prod"}, state.CommitMsg) + assert.Equal(t, "LABEL type=web env=prod", state.GetCommits()) assert.True(t, reflect.DeepEqual(state.Config.Labels, expectedLabels), "bad result labels") } @@ -318,7 +316,7 @@ func TestCommandMaintainer_Simple(t *testing.T) { t.Fatal(err) } - assert.Len(t, state.CommitMsg, 0) + assert.Equal(t, "", state.GetCommits()) } // =========== Testing WORKDIR =========== @@ -648,6 +646,7 @@ func TestCommandPush_Simple(t *testing.T) { args: []string{"docker.io/grammarly/rocker:1.0"}, }} + b.cfg.Push = true b.state.ImageID = "123" c.On("TagImage", "123", "docker.io/grammarly/rocker:1.0").Return(nil).Once() @@ -706,7 +705,7 @@ func TestCommandMount_Simple(t *testing.T) { c.AssertExpectations(t) assert.Equal(t, []string{"/resolved/src:/dest"}, state.HostConfig.Binds) - assert.Equal(t, []string{`MOUNT ["/src:/dest"]`}, state.CommitMsg) + assert.Equal(t, `MOUNT ["/src:/dest"]`, state.GetCommits()) } func TestCommandMount_VolumeContainer(t *testing.T) { @@ -735,7 +734,7 @@ func TestCommandMount_VolumeContainer(t *testing.T) { c.AssertExpectations(t) assert.Equal(t, []string{containerName}, state.HostConfig.VolumesFrom) - assert.Equal(t, []string{commitMsg}, state.CommitMsg) + assert.Equal(t, commitMsg, state.GetCommits()) } // TODO: test Cleanup diff --git a/src/rocker/build2/compare.go b/src/rocker/build2/compare.go new file mode 100644 index 00000000..895f3326 --- /dev/null +++ b/src/rocker/build2/compare.go @@ -0,0 +1,94 @@ +/*- + * Copyright 2015 Grammarly, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package build2 + +import "github.com/fsouza/go-dockerclient" + +// CompareConfigs compares two Config struct. Does not compare the "Image" nor "Hostname" fields +// If OpenStdin is set, then it differs +func CompareConfigs(a, b docker.Config) bool { + // Experimental: do not consider rocker-data labels when comparing + if _, ok := a.Labels["rocker-data"]; ok { + tmp := a.Labels["rocker-data"] + delete(a.Labels, "rocker-data") + defer func() { a.Labels["rocker-data"] = tmp }() + } + if _, ok := b.Labels["rocker-data"]; ok { + tmp := b.Labels["rocker-data"] + delete(b.Labels, "rocker-data") + defer func() { b.Labels["rocker-data"] = tmp }() + } + + if a.OpenStdin || b.OpenStdin { + return false + } + + if a.AttachStdout != b.AttachStdout || + a.AttachStderr != b.AttachStderr || + a.User != b.User || + a.OpenStdin != b.OpenStdin || + a.Tty != b.Tty { + return false + } + + if len(a.Cmd) != len(b.Cmd) || + len(a.Env) != len(b.Env) || + len(a.Labels) != len(b.Labels) || + len(a.PortSpecs) != len(b.PortSpecs) || + len(a.ExposedPorts) != len(b.ExposedPorts) || + len(a.Entrypoint) != len(b.Entrypoint) || + len(a.Volumes) != len(b.Volumes) { + return false + } + + for i := 0; i < len(a.Cmd); i++ { + if a.Cmd[i] != b.Cmd[i] { + return false + } + } + for i := 0; i < len(a.Env); i++ { + if a.Env[i] != b.Env[i] { + return false + } + } + for k, v := range a.Labels { + if v != b.Labels[k] { + return false + } + } + for i := 0; i < len(a.PortSpecs); i++ { + if a.PortSpecs[i] != b.PortSpecs[i] { + return false + } + } + for k := range a.ExposedPorts { + if _, exists := b.ExposedPorts[k]; !exists { + return false + } + } + for i := 0; i < len(a.Entrypoint); i++ { + if a.Entrypoint[i] != b.Entrypoint[i] { + return false + } + } + for key := range a.Volumes { + if _, exists := b.Volumes[key]; !exists { + return false + } + } + return true +} diff --git a/src/rocker/build2/copy.go b/src/rocker/build2/copy.go index ab5f3faa..302b812c 100644 --- a/src/rocker/build2/copy.go +++ b/src/rocker/build2/copy.go @@ -79,7 +79,6 @@ func copyFiles(b *Build, args []string, cmdName string) (s State, err error) { // skip COPY if no files matched if len(u.files) == 0 { log.Infof("| No files matched") - s.SkipCommit() return s, nil } @@ -98,6 +97,15 @@ func copyFiles(b *Build, args []string, cmdName string) (s State, err error) { message := fmt.Sprintf("%s %s to %s", cmdName, tarSum.Sum(nil), dest) s.Commit(message) + // Check cache + s, hit, err := b.probeCache(s) + if err != nil { + return s, err + } + if hit { + return s, nil + } + origCmd := s.Config.Cmd s.Config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + message} diff --git a/src/rocker/build2/plan.go b/src/rocker/build2/plan.go index b06101fb..ca593759 100644 --- a/src/rocker/build2/plan.go +++ b/src/rocker/build2/plan.go @@ -37,9 +37,9 @@ func NewPlan(commands []ConfigCommand, finalCleanup bool) (plan Plan, err error) }) } - alwaysCommitBefore := "run attach add copy tag push" - alwaysCommitAfter := "run attach add copy import" - neverCommitAfter := "from maintainer tag push export" + alwaysCommitBefore := "run attach add copy tag push export import" + alwaysCommitAfter := "run attach add copy export import" + neverCommitAfter := "from maintainer tag push" for i := 0; i < len(commands); i++ { cfg := commands[i] diff --git a/src/rocker/build2/state.go b/src/rocker/build2/state.go new file mode 100644 index 00000000..95703826 --- /dev/null +++ b/src/rocker/build2/state.go @@ -0,0 +1,60 @@ +/*- + * Copyright 2015 Grammarly, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package build2 + +import ( + "fmt" + "sort" + "strings" + + "github.com/fsouza/go-dockerclient" +) + +type State struct { + Config docker.Config + HostConfig docker.HostConfig + ImageID string + ParentID string + ContainerID string + ExportsID string + Commits []string + ProducedImage bool + CmdSet bool + InjectCommands []string + Dockerignore []string +} + +func NewState(b *Build) State { + return State{ + Dockerignore: b.cfg.Dockerignore, + } +} + +func (s *State) Commit(msg string, args ...interface{}) *State { + s.Commits = append(s.Commits, fmt.Sprintf(msg, args...)) + sort.Strings(s.Commits) + return s +} + +func (s State) GetCommits() string { + return strings.Join(s.Commits, "; ") +} + +func (s State) Equals(s2 State) bool { + // TODO: compare other properties? + return s.GetCommits() == s2.GetCommits() +} From 72bf29535934905b8b3811599e7d6b6f7ffd4ff8 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Sun, 27 Sep 2015 11:52:47 +0300 Subject: [PATCH 071/131] cleanup dockerignore --- .dockerignore | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/.dockerignore b/.dockerignore index ad4a9beb..34f27cd6 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,9 +1,7 @@ .git dist -.dockerignore -.rockertmp* -Rockerfile +**/.a pkg vendor/pkg -Rockerfile.exec -Rockerfile.build-cross +example +rsync From af7a9f169dfd22e5925ea8a60976dd59cb4a9c20 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Sun, 27 Sep 2015 11:52:54 +0300 Subject: [PATCH 072/131] cleanup gitignore --- .gitignore | 3 --- 1 file changed, 3 deletions(-) diff --git a/.gitignore b/.gitignore index 0a20ae6c..53417165 100644 --- a/.gitignore +++ b/.gitignore @@ -7,6 +7,3 @@ dev .idea *.sublime-workspace - -src/rocker/build2/testdata/file.tar -src/rocker/build2/testdata/file.txt2 From 5baf13c2e70bbfa82862343520c67e2d8f5916ec Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Sun, 27 Sep 2015 11:54:00 +0300 Subject: [PATCH 073/131] fix EXPORT/IMPORT cache --- src/rocker/build2/build.go | 21 +++++++++++++-------- src/rocker/build2/commands.go | 21 ++++++++++++--------- src/rocker/build2/state.go | 1 + 3 files changed, 26 insertions(+), 17 deletions(-) diff --git a/src/rocker/build2/build.go b/src/rocker/build2/build.go index 18ff30e8..aaa20b6f 100644 --- a/src/rocker/build2/build.go +++ b/src/rocker/build2/build.go @@ -54,12 +54,16 @@ type Build struct { ProducedSize int64 VirtualSize int64 - rockerfile *Rockerfile - cache Cache - cfg Config - client Client - state State - cacheBusted bool + rockerfile *Rockerfile + cache Cache + cfg Config + client Client + state State + + // A little hack to support cross-FROM cache for EXPORTS + // maybe rethink it later + exportsID string + exportsCacheBusted bool } func New(client Client, rockerfile *Rockerfile, cache Cache, cfg Config) *Build { @@ -128,7 +132,7 @@ func (b *Build) GetImageID() string { } func (b *Build) probeCache(s State) (cachedState State, hit bool, err error) { - if b.cache == nil || b.cacheBusted { + if b.cache == nil || s.CacheBusted { return s, false, nil } @@ -137,7 +141,7 @@ func (b *Build) probeCache(s State) (cachedState State, hit bool, err error) { return s, false, err } if s2 == nil { - b.cacheBusted = true + s.CacheBusted = true log.Info(color.New(color.FgYellow).SprintFunc()("| Not cached")) return s, false, nil } @@ -166,6 +170,7 @@ func (b *Build) probeCache(s State) (cachedState State, hit bool, err error) { // TODO: maybe move somewhere s2.Commits = []string{} + s2.CacheBusted = false return *s2, true, nil } diff --git a/src/rocker/build2/commands.go b/src/rocker/build2/commands.go index 13552174..4f1bb38b 100644 --- a/src/rocker/build2/commands.go +++ b/src/rocker/build2/commands.go @@ -1047,16 +1047,19 @@ func (c *CommandExport) Execute(b *Build) (s State, err error) { return s, err } if hit { + b.exportsID = s.ExportsID return s, nil } + // Hack to support cross-FROM cache for EXPORTS + b.exportsCacheBusted = true + // Remember original stuff so we can restore it when we finished - var exportsID string origState := s defer func() { s = origState - s.ExportsID = exportsID + s.ExportsID = b.exportsID }() // Append exports container as a volume @@ -1074,14 +1077,14 @@ func (c *CommandExport) Execute(b *Build) (s State, err error) { s.Config.Cmd = cmd s.Config.Entrypoint = []string{} - if exportsID, err = b.client.CreateContainer(s); err != nil { + if b.exportsID, err = b.client.CreateContainer(s); err != nil { return s, err } - defer b.client.RemoveContainer(exportsID) + defer b.client.RemoveContainer(b.exportsID) - log.Infof("| Running in %.12s: %s", exportsID, strings.Join(cmd, " ")) + log.Infof("| Running in %.12s: %s", b.exportsID, strings.Join(cmd, " ")) - if err = b.client.RunContainer(exportsID, false); err != nil { + if err = b.client.RunContainer(b.exportsID, false); err != nil { return s, err } @@ -1108,7 +1111,7 @@ func (c *CommandImport) Execute(b *Build) (s State, err error) { if len(args) == 0 { return s, fmt.Errorf("IMPORT requires at least one argument") } - if s.ExportsID == "" { + if b.exportsID == "" { return s, fmt.Errorf("You have to EXPORT something first in order to IMPORT") } @@ -1130,14 +1133,14 @@ func (c *CommandImport) Execute(b *Build) (s State, err error) { src = append(src, argResolved) } - s.Commit("IMPORT %.12s:%q %s", s.ExportsID, src, dest) + s.Commit("IMPORT %.12s:%q %s", b.exportsID, src, dest) // Check cache s, hit, err := b.probeCache(s) if err != nil { return s, err } - if hit { + if hit && !b.exportsCacheBusted { return s, nil } diff --git a/src/rocker/build2/state.go b/src/rocker/build2/state.go index 95703826..31d5bfed 100644 --- a/src/rocker/build2/state.go +++ b/src/rocker/build2/state.go @@ -34,6 +34,7 @@ type State struct { Commits []string ProducedImage bool CmdSet bool + CacheBusted bool InjectCommands []string Dockerignore []string } From 26cb068efd2365ea0c390eff187e1b37f33a0052 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Mon, 28 Sep 2015 11:19:49 +0300 Subject: [PATCH 074/131] improve EXPORT/IMPORT caching --- src/rocker/build2/build.go | 4 ++-- src/rocker/build2/commands.go | 33 +++++++++++++++++++++------------ 2 files changed, 23 insertions(+), 14 deletions(-) diff --git a/src/rocker/build2/build.go b/src/rocker/build2/build.go index aaa20b6f..7b9818da 100644 --- a/src/rocker/build2/build.go +++ b/src/rocker/build2/build.go @@ -62,8 +62,7 @@ type Build struct { // A little hack to support cross-FROM cache for EXPORTS // maybe rethink it later - exportsID string - exportsCacheBusted bool + exports []string } func New(client Client, rockerfile *Rockerfile, cache Cache, cfg Config) *Build { @@ -72,6 +71,7 @@ func New(client Client, rockerfile *Rockerfile, cache Cache, cfg Config) *Build cache: cache, cfg: cfg, client: client, + exports: []string{}, } b.state = NewState(b) return b diff --git a/src/rocker/build2/commands.go b/src/rocker/build2/commands.go index 4f1bb38b..918c4b8e 100644 --- a/src/rocker/build2/commands.go +++ b/src/rocker/build2/commands.go @@ -1047,19 +1047,18 @@ func (c *CommandExport) Execute(b *Build) (s State, err error) { return s, err } if hit { - b.exportsID = s.ExportsID + b.exports = append(b.exports, s.ExportsID) return s, nil } - // Hack to support cross-FROM cache for EXPORTS - b.exportsCacheBusted = true - // Remember original stuff so we can restore it when we finished + var exportsId string origState := s defer func() { s = origState - s.ExportsID = b.exportsID + s.ExportsID = exportsId + b.exports = append(b.exports, exportsId) }() // Append exports container as a volume @@ -1077,14 +1076,14 @@ func (c *CommandExport) Execute(b *Build) (s State, err error) { s.Config.Cmd = cmd s.Config.Entrypoint = []string{} - if b.exportsID, err = b.client.CreateContainer(s); err != nil { + if exportsId, err = b.client.CreateContainer(s); err != nil { return s, err } - defer b.client.RemoveContainer(b.exportsID) + defer b.client.RemoveContainer(exportsId) - log.Infof("| Running in %.12s: %s", b.exportsID, strings.Join(cmd, " ")) + log.Infof("| Running in %.12s: %s", exportsId, strings.Join(cmd, " ")) - if err = b.client.RunContainer(b.exportsID, false); err != nil { + if err = b.client.RunContainer(exportsId, false); err != nil { return s, err } @@ -1111,10 +1110,16 @@ func (c *CommandImport) Execute(b *Build) (s State, err error) { if len(args) == 0 { return s, fmt.Errorf("IMPORT requires at least one argument") } - if b.exportsID == "" { + if len(b.exports) == 0 { return s, fmt.Errorf("You have to EXPORT something first in order to IMPORT") } + // TODO: EXPORT and IMPORT cache is not invalidated properly in between + // different tracks of the same build. The EXPORT may be cached + // because it was built earlier with the same prerequisites, but the actual + // data in the exports container may be from the latest EXPORT of different + // build. So we need to prefix ~/.rocker_exports dir with some id somehow. + log.Infof("| Import from %s", b.exportsContainerName()) // If only one argument was given to IMPORT, use the same path for destination @@ -1133,14 +1138,15 @@ func (c *CommandImport) Execute(b *Build) (s State, err error) { src = append(src, argResolved) } - s.Commit("IMPORT %.12s:%q %s", b.exportsID, src, dest) + sort.Strings(b.exports) + s.Commit("IMPORT %q : %q %s", b.exports, src, dest) // Check cache s, hit, err := b.probeCache(s) if err != nil { return s, err } - if hit && !b.exportsCacheBusted { + if hit { return s, nil } @@ -1177,6 +1183,9 @@ func (c *CommandImport) Execute(b *Build) (s State, err error) { return s, err } + // TODO: if b.exportsCacheBusted and IMPORT cache was invalidated, + // CommitCommand then caches it anyway. + return s, nil } From d9f89a9ade0789f6c0f370be33c7bd95c27fd0a2 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Mon, 28 Sep 2015 16:09:39 +0300 Subject: [PATCH 075/131] FROM scratch --- README.md | 4 ++-- src/rocker/build2/build.go | 4 +++- src/rocker/build2/cache.go | 8 +++++++ src/rocker/build2/client.go | 7 +++++- src/rocker/build2/commands.go | 34 +++++++++++++++++++++--------- src/rocker/build2/commands_test.go | 2 ++ src/rocker/build2/state.go | 1 + 7 files changed, 46 insertions(+), 14 deletions(-) diff --git a/README.md b/README.md index fd77423c..c1a29419 100644 --- a/README.md +++ b/README.md @@ -7,8 +7,8 @@ In this branch we are developing the new experimental implementation of Rocker t ### v1 TODO -- [ ] Cache -- [ ] FROM scratch +- [x] Cache +- [x] FROM scratch - [ ] ADD urls and local archives --- diff --git a/src/rocker/build2/build.go b/src/rocker/build2/build.go index 7b9818da..73838ecf 100644 --- a/src/rocker/build2/build.go +++ b/src/rocker/build2/build.go @@ -151,7 +151,9 @@ func (b *Build) probeCache(s State) (cachedState State, hit bool, err error) { return s, true, err } if img == nil { - log.Warnf("Cannot find the cached image %.12s, consider cleaning the cache", s2.ImageID) + defer b.cache.Del(*s2) + s.CacheBusted = true + log.Info(color.New(color.FgYellow).SprintFunc()("| Not cached")) return s, false, nil } diff --git a/src/rocker/build2/cache.go b/src/rocker/build2/cache.go index 116b6014..e10efbc0 100644 --- a/src/rocker/build2/cache.go +++ b/src/rocker/build2/cache.go @@ -28,6 +28,7 @@ import ( type Cache interface { Get(s State) (s2 *State, err error) Put(s State) error + Del(s State) error } type CacheFS struct { @@ -89,3 +90,10 @@ func (c *CacheFS) Put(s State) error { } return ioutil.WriteFile(fileName, data, 0644) } + +func (c *CacheFS) Del(s State) error { + log.Debugf("CACHE DELETE %s %s %q", s.ParentID, s.ImageID, s.Commits) + + fileName := filepath.Join(c.root, s.ParentID, s.ImageID) + ".json" + return os.RemoveAll(fileName) +} diff --git a/src/rocker/build2/client.go b/src/rocker/build2/client.go index efd8d36d..9b8516a5 100644 --- a/src/rocker/build2/client.go +++ b/src/rocker/build2/client.go @@ -137,7 +137,12 @@ func (c *DockerClient) CreateContainer(s State) (string, error) { return "", err } - log.Infof("| Created container %.12s (image %.12s)", container.ID, s.ImageID) + imageStr := fmt.Sprintf("(image %.12s)", s.ImageID) + if s.ImageID == "" { + imageStr = "(from scratch)" + } + + log.Infof("| Created container %.12s %s", container.ID, imageStr) return container.ID, nil } diff --git a/src/rocker/build2/commands.go b/src/rocker/build2/commands.go index 918c4b8e..455d534e 100644 --- a/src/rocker/build2/commands.go +++ b/src/rocker/build2/commands.go @@ -138,6 +138,11 @@ func (c *CommandFrom) Execute(b *Build) (s State, err error) { name = c.cfg.args[0] ) + if name == "scratch" { + s.NoBaseImage = true + return s, nil + } + // If Pull is true, then img will remain nil and it will be pulled below if !b.cfg.Pull { if img, err = b.client.InspectImage(name); err != nil { @@ -268,19 +273,28 @@ func (c *CommandCommit) Execute(b *Build) (s State, err error) { return s, nil } + if s.ImageID == "" && !s.NoBaseImage { + return s, fmt.Errorf("Please provide a source image with `from` prior to commit") + } + // TODO: ? // if len(commits) == 0 && s.ContainerID == "" { log.Infof("| Skip") - // Check cache - s, hit, err := b.probeCache(s) - if err != nil { - return s, err - } - if hit { - return s, nil - } + // TODO: verify that we need to check cache in commit only for + // a non-container actions if s.ContainerID == "" { + + // Check cache + var hit bool + s, hit, err = b.probeCache(s) + if err != nil { + return s, err + } + if hit { + return s, nil + } + origCmd := s.Config.Cmd s.Config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + commits} @@ -335,7 +349,7 @@ func (c *CommandRun) ShouldRun(b *Build) (bool, error) { func (c *CommandRun) Execute(b *Build) (s State, err error) { s = b.state - if s.ImageID == "" { + if s.ImageID == "" && !s.NoBaseImage { return s, fmt.Errorf("Please provide a source image with `FROM` prior to run") } @@ -401,7 +415,7 @@ func (c *CommandAttach) Execute(b *Build) (s State, err error) { return s, nil } - if s.ImageID == "" { + if s.ImageID == "" && !s.NoBaseImage { return s, fmt.Errorf("Please provide a source image with `FROM` prior to ATTACH") } diff --git a/src/rocker/build2/commands_test.go b/src/rocker/build2/commands_test.go index 6f8cee1f..352acfd7 100644 --- a/src/rocker/build2/commands_test.go +++ b/src/rocker/build2/commands_test.go @@ -161,6 +161,7 @@ func TestCommandCommit_Simple(t *testing.T) { cmd := &CommandCommit{} resultImage := &docker.Image{ID: "789"} + b.state.ImageID = "123" b.state.ContainerID = "456" b.state.Commit("a").Commit("b") @@ -185,6 +186,7 @@ func TestCommandCommit_NoContainer(t *testing.T) { cmd := &CommandCommit{} resultImage := &docker.Image{ID: "789"} + b.state.ImageID = "123" b.state.Commit("a").Commit("b") c.On("CreateContainer", mock.AnythingOfType("State")).Return("456", nil).Run(func(args mock.Arguments) { diff --git a/src/rocker/build2/state.go b/src/rocker/build2/state.go index 31d5bfed..1162a47a 100644 --- a/src/rocker/build2/state.go +++ b/src/rocker/build2/state.go @@ -32,6 +32,7 @@ type State struct { ContainerID string ExportsID string Commits []string + NoBaseImage bool ProducedImage bool CmdSet bool CacheBusted bool From 9cf725755e87c9b04d3981b1a385938766680958 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Mon, 28 Sep 2015 16:23:24 +0300 Subject: [PATCH 076/131] fix make --- Makefile | 20 ++++++++++---------- VERSION | 2 +- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/Makefile b/Makefile index a5061766..29dc14a1 100644 --- a/Makefile +++ b/Makefile @@ -36,10 +36,19 @@ UPLOAD_CMD = $(GITHUB_RELEASE) upload \ SRCS = $(shell find . -name '*.go' | grep -v '^./vendor/') PKGS := $(foreach pkg, $(sort $(dir $(SRCS))), $(pkg)) -GOPATH ?= $(shell pwd):$(shell pwd)/vendor +GOPATH = $(shell pwd):$(shell pwd)/vendor TESTARGS ?= +binary: + GOPATH=$(GOPATH) go build \ + -ldflags "-X main.Version=$(VERSION) -X main.GitCommit=$(GITCOMMIT) -X main.GitBranch=$(GITBRANCH) -X main.BuildTime=$(BUILDTIME)" \ + -v -o bin/rocker src/cmd/rocker/main.go + +install: + cp bin/rocker /usr/local/bin/rocker + chmod +x /usr/local/bin/rocker + all: $(ALL_BINARIES) $(foreach BIN, $(BINARIES), $(shell cp dist/$(VERSION)/$(shell go env GOOS)/amd64/$(BIN) dist/$(BIN))) @@ -75,18 +84,9 @@ build_image: docker_image: rocker build -var Version=$(VERSION) -install: - cp dist/$(VERSION)/$(shell go env GOOS)/amd64/rocker /usr/local/bin/rocker - chmod +x /usr/local/bin/rocker - clean: rm -Rf dist -local_binary: - go build \ - -ldflags "-X main.Version=$(VERSION) -X main.GitCommit=$(GITCOMMIT) -X main.GitBranch=$(GITBRANCH) -X main.BuildTime=$(BUILDTIME)" \ - -v -o bin/rocker src/cmd/rocker/main.go - testdeps: @ go get github.com/GeertJohan/fgt @ go get github.com/constabulary/gb/... diff --git a/VERSION b/VERSION index abd41058..3eefcb9d 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.2.4 +1.0.0 From b35a584fcd4611e25e10ec5539b64e8bef477245 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Mon, 28 Sep 2015 16:46:53 +0300 Subject: [PATCH 077/131] fix COPY to substitute WORKDIR when dest is relative --- src/rocker/build2/copy.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/rocker/build2/copy.go b/src/rocker/build2/copy.go index 302b812c..8388801e 100644 --- a/src/rocker/build2/copy.go +++ b/src/rocker/build2/copy.go @@ -72,6 +72,10 @@ func copyFiles(b *Build, args []string, cmdName string) (s State, err error) { return s, fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName) } + if !filepath.IsAbs(dest) { + dest = filepath.Join(s.Config.WorkingDir, dest) + } + if u, err = makeTarStream(b.cfg.ContextDir, dest, cmdName, src, excludes); err != nil { return s, err } From 62c50c0b8e483a02022d2a1fa9bf849aa965c6bf Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Mon, 28 Sep 2015 16:57:03 +0300 Subject: [PATCH 078/131] fix COPY relative dest --- src/rocker/build2/copy.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/rocker/build2/copy.go b/src/rocker/build2/copy.go index 8388801e..89810d82 100644 --- a/src/rocker/build2/copy.go +++ b/src/rocker/build2/copy.go @@ -68,12 +68,16 @@ func copyFiles(b *Build, args []string, cmdName string) (s State, err error) { ) // If destination is not a directory (no leading slash) - if !strings.HasSuffix(dest, string(os.PathSeparator)) && len(src) > 1 { + hasLeadingSlash := strings.HasSuffix(dest, string(os.PathSeparator)) + if !hasLeadingSlash && len(src) > 1 { return s, fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName) } if !filepath.IsAbs(dest) { dest = filepath.Join(s.Config.WorkingDir, dest) + if hasLeadingSlash { + dest += string(os.PathSeparator) + } } if u, err = makeTarStream(b.cfg.ContextDir, dest, cmdName, src, excludes); err != nil { From bfd139eae2dcb80875c2c98d5a426728f1c3d332 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Tue, 29 Sep 2015 07:34:02 +0300 Subject: [PATCH 079/131] COPY todo --- src/rocker/build2/copy_test.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/rocker/build2/copy_test.go b/src/rocker/build2/copy_test.go index e469aa84..0ba7a14c 100644 --- a/src/rocker/build2/copy_test.go +++ b/src/rocker/build2/copy_test.go @@ -665,6 +665,11 @@ func TestCopy_MakeTarStream_SingleFileDirRename(t *testing.T) { assert.Equal(t, assertion, out, "bad tar content") } +// TODO: +// WORKDIR /app +// COPY lib lib/ +// should copy to /app/lib + // helper functions func makeTmpDir(t *testing.T, files map[string]string) string { From ad815cd85cf786911512ba4e32d6c1d2614558d1 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Tue, 29 Sep 2015 07:35:34 +0300 Subject: [PATCH 080/131] readme: install v1 --- README.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/README.md b/README.md index c1a29419..0cb73ab1 100644 --- a/README.md +++ b/README.md @@ -5,6 +5,13 @@ Rocker breaks the limits of Dockerfile. It adds some crucial features that are m # *NOTE on v1 branch* In this branch we are developing the new experimental implementation of Rocker that will be completely client-side driven, with no fallback on `docker build`. This means faster builds and more power. No build context uploads anymore. Also, the builder code is completely rewritten and made much more testable and extensible in the future. Caching might be also rethought. Cross-server builds determinism is our dream. +Install v1 (you should have golang 1.5): + +```bash +make +make install +``` + ### v1 TODO - [x] Cache From 28293925a9ed612f399b830ab1b689f67e6b737c Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Wed, 30 Sep 2015 09:48:00 +0300 Subject: [PATCH 081/131] build2 -> build --- src/cmd/rocker/main.go | 19 +- src/rocker/{build2 => build}/build.go | 2 +- src/rocker/{build2 => build}/build_test.go | 2 +- src/rocker/build/builder.go | 394 ----- src/rocker/build/builder_test.go | 1152 ------------- src/rocker/{build2 => build}/cache.go | 2 +- src/rocker/{build2 => build}/cache_test.go | 2 +- src/rocker/{build2 => build}/client.go | 2 +- src/rocker/{build2 => build}/client_tty.go | 2 +- src/rocker/build/commands.go | 1431 ++++++++++++----- src/rocker/{build2 => build}/commands_test.go | 2 +- src/rocker/{build2 => build}/compare.go | 2 +- src/rocker/build/config.go | 95 -- .../{build2 => build}/container_formatter.go | 2 +- src/rocker/build/containers.go | 292 ---- src/rocker/{build2 => build}/copy.go | 2 +- src/rocker/{build2 => build}/copy_test.go | 2 +- src/rocker/{build2 => build}/dockerignore.go | 2 +- .../{build2 => build}/dockerignore_test.go | 2 +- src/rocker/build/imagedata.go | 75 - src/rocker/build/internals.go | 477 ------ src/rocker/{build2 => build}/plan.go | 2 +- src/rocker/{build2 => build}/plan_test.go | 2 +- src/rocker/build/rockerfile.go | 138 +- src/rocker/build/rockerfile_test.go | 89 +- src/rocker/build/semver.go | 64 - src/rocker/{build2 => build}/state.go | 2 +- src/rocker/{build2 => build}/tar.go | 2 +- src/rocker/{build2 => build}/tar_unix.go | 2 +- src/rocker/{build2 => build}/tar_windows.go | 2 +- src/rocker/build/testdata/Rockerfile_result | 15 - .../build/testdata/Rockerfile_string_result | 15 - src/rocker/build/tty.go | 70 - src/rocker/{build2 => build}/util.go | 2 +- src/rocker/build2/commands.go | 1221 -------------- src/rocker/build2/rockerfile.go | 164 -- src/rocker/build2/rockerfile_test.go | 77 - src/rocker/build2/testdata/Rockerfile | 34 - 38 files changed, 1191 insertions(+), 4671 deletions(-) rename src/rocker/{build2 => build}/build.go (99%) rename src/rocker/{build2 => build}/build_test.go (99%) delete mode 100644 src/rocker/build/builder.go delete mode 100644 src/rocker/build/builder_test.go rename src/rocker/{build2 => build}/cache.go (99%) rename src/rocker/{build2 => build}/cache_test.go (98%) rename src/rocker/{build2 => build}/client.go (99%) rename src/rocker/{build2 => build}/client_tty.go (99%) rename src/rocker/{build2 => build}/commands_test.go (99%) rename src/rocker/{build2 => build}/compare.go (99%) delete mode 100644 src/rocker/build/config.go rename src/rocker/{build2 => build}/container_formatter.go (98%) delete mode 100644 src/rocker/build/containers.go rename src/rocker/{build2 => build}/copy.go (99%) rename src/rocker/{build2 => build}/copy_test.go (99%) rename src/rocker/{build2 => build}/dockerignore.go (99%) rename src/rocker/{build2 => build}/dockerignore_test.go (98%) delete mode 100644 src/rocker/build/imagedata.go delete mode 100644 src/rocker/build/internals.go rename src/rocker/{build2 => build}/plan.go (99%) rename src/rocker/{build2 => build}/plan_test.go (99%) delete mode 100644 src/rocker/build/semver.go rename src/rocker/{build2 => build}/state.go (98%) rename src/rocker/{build2 => build}/tar.go (99%) rename src/rocker/{build2 => build}/tar_unix.go (98%) rename src/rocker/{build2 => build}/tar_windows.go (99%) delete mode 100644 src/rocker/build/testdata/Rockerfile_result delete mode 100644 src/rocker/build/testdata/Rockerfile_string_result delete mode 100644 src/rocker/build/tty.go rename src/rocker/{build2 => build}/util.go (99%) delete mode 100644 src/rocker/build2/commands.go delete mode 100644 src/rocker/build2/rockerfile.go delete mode 100644 src/rocker/build2/rockerfile_test.go delete mode 100644 src/rocker/build2/testdata/Rockerfile diff --git a/src/cmd/rocker/main.go b/src/cmd/rocker/main.go index 5c7f4665..1354102a 100644 --- a/src/cmd/rocker/main.go +++ b/src/cmd/rocker/main.go @@ -24,7 +24,6 @@ import ( "strings" "rocker/build" - "rocker/build2" "rocker/dockerclient" "rocker/imagename" "rocker/template" @@ -177,7 +176,7 @@ func main() { func buildCommand(c *cli.Context) { var ( - rockerfile *build2.Rockerfile + rockerfile *build.Rockerfile err error ) @@ -216,7 +215,7 @@ func buildCommand(c *cli.Context) { if configFilename == "-" { - rockerfile, err = build2.NewRockerfile(filepath.Base(wd), os.Stdin, vars, template.Funs{}) + rockerfile, err = build.NewRockerfile(filepath.Base(wd), os.Stdin, vars, template.Funs{}) if err != nil { log.Fatal(err) } @@ -227,7 +226,7 @@ func buildCommand(c *cli.Context) { configFilename = filepath.Join(wd, configFilename) } - rockerfile, err = build2.NewRockerfileFromFile(configFilename, vars, template.Funs{}) + rockerfile, err = build.NewRockerfileFromFile(configFilename, vars, template.Funs{}) if err != nil { log.Fatal(err) } @@ -252,7 +251,7 @@ func buildCommand(c *cli.Context) { dockerignoreFilename := filepath.Join(contextDir, ".dockerignore") if _, err := os.Stat(dockerignoreFilename); err == nil { - if dockerignore, err = build2.ReadDockerignoreFile(dockerignoreFilename); err != nil { + if dockerignore, err = build.ReadDockerignoreFile(dockerignoreFilename); err != nil { log.Fatal(err) } } @@ -270,15 +269,15 @@ func buildCommand(c *cli.Context) { auth.Password = userPass[1] } - client := build2.NewDockerClient(dockerClient, auth) + client := build.NewDockerClient(dockerClient, auth) - var cache build2.Cache + var cache build.Cache if !c.Bool("no-cache") { // TODO: configurable cache dir - cache = build2.NewCacheFS(os.Getenv("HOME") + "/.rocker_cache") + cache = build.NewCacheFS(os.Getenv("HOME") + "/.rocker_cache") } - builder := build2.New(client, rockerfile, cache, build2.Config{ + builder := build.New(client, rockerfile, cache, build.Config{ InStream: os.Stdin, OutStream: os.Stdout, ContextDir: contextDir, @@ -292,7 +291,7 @@ func buildCommand(c *cli.Context) { Push: c.Bool("push"), }) - plan, err := build2.NewPlan(rockerfile.Commands(), true) + plan, err := build.NewPlan(rockerfile.Commands(), true) if err != nil { log.Fatal(err) } diff --git a/src/rocker/build2/build.go b/src/rocker/build/build.go similarity index 99% rename from src/rocker/build2/build.go rename to src/rocker/build/build.go index 73838ecf..7827f0b3 100644 --- a/src/rocker/build2/build.go +++ b/src/rocker/build/build.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package build2 +package build import ( "fmt" diff --git a/src/rocker/build2/build_test.go b/src/rocker/build/build_test.go similarity index 99% rename from src/rocker/build2/build_test.go rename to src/rocker/build/build_test.go index aab0d4c3..ba963a52 100644 --- a/src/rocker/build2/build_test.go +++ b/src/rocker/build/build_test.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package build2 +package build import ( "io" diff --git a/src/rocker/build/builder.go b/src/rocker/build/builder.go deleted file mode 100644 index 1c20633e..00000000 --- a/src/rocker/build/builder.go +++ /dev/null @@ -1,394 +0,0 @@ -/*- - * Copyright 2015 Grammarly, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Package build does build of given Rockerfile -package build - -import ( - "crypto/md5" - "fmt" - "io" - "os" - "path" - "path/filepath" - "strings" - - "rocker/imagename" - "rocker/parser" - "rocker/template" - - "github.com/docker/docker/pkg/term" - "github.com/fsouza/go-dockerclient" -) - -const ( - // busybox used for cache data volume containers - busyboxImage = "busybox:buildroot-2013.08.1" - rsyncImage = "grammarly/rsync-static:1" - - exportsVolume = "/.rocker_exports" -) - -var ( - // PassEnvVars is the list of ENV variables to pass to a Rockerfile - PassEnvVars = []string{"GIT_SSH_KEY"} -) - -// Builder is the main builder object. It holds configuration options and -// intermedate state while looping through a build commands. -type Builder struct { - Rockerfile string - RockerfileContent string - ContextDir string - ID string - OutStream io.Writer - InStream io.ReadCloser - Docker *docker.Client - Config *docker.Config - Auth *docker.AuthConfiguration - UtilizeCache bool - Push bool - NoReuse bool - Verbose bool - Attach bool - Vars template.Vars - CliVars template.Vars - AddMeta bool - Print bool - Pull bool - ArtifactsPath string - - rootNode *parser.Node - i int - imageID string - mounts []builderMount - allMounts []builderMount - dockerfile *parser.Node - cacheBusted bool - exportDirs []string - intermediateImages []string - exportsContainerID string - lastExportImageID string - gitIgnored bool - isTerminalIn bool - isTerminalOut bool - fdIn uintptr - fdOut uintptr - metaAdded bool - recentTags []*imagename.ImageName - imagesCache []docker.APIImages -} - -type builderMount struct { - cache bool - origSrc string - src string - dest string - containerID string -} - -func (mount builderMount) String() string { - if mount.src != "" { - return mount.src + ":" + mount.dest - } - return mount.dest + ":" + mount.containerID -} - -// Build runs the build of given Rockerfile and returns image id -func (builder *Builder) Build() (imageID string, err error) { - // Do initial cleanup, you know, just to be sure - // Previous builds could be ended up abnormally - if err := builder.cleanup(); err != nil { - return "", err - } - - // Initialize auth configuration - if builder.Auth == nil { - builder.Auth = &docker.AuthConfiguration{} - } - - // Initialize in/out file descriptors - if builder.InStream != nil { - fd, isTerminal := term.GetFdInfo(builder.InStream) - builder.fdIn = fd - builder.isTerminalIn = isTerminal - } - if builder.OutStream != nil { - fd, isTerminal := term.GetFdInfo(builder.OutStream) - builder.fdOut = fd - builder.isTerminalOut = isTerminal - } - - // Wrap this into function to have deferred functions run before - // we do final checks - run := func() (err error) { - fd, err := os.Open(builder.Rockerfile) - if err != nil { - return fmt.Errorf("Failed to open file %s, error: %s", builder.Rockerfile, err) - } - defer fd.Close() - - data, err := template.Process(builder.Rockerfile, fd, builder.Vars.ToMapOfInterface(), map[string]interface{}{}) - if err != nil { - return err - } - builder.RockerfileContent = data.String() - - if builder.Print { - fmt.Print(builder.RockerfileContent) - os.Exit(0) - } - - if builder.ContextDir == "" { - builder.ContextDir = filepath.Dir(builder.Rockerfile) - } - - if _, err := os.Stat(builder.ContextDir); err != nil { - return err - } - - if err := builder.checkDockerignore(); err != nil { - return err - } - - rootNode, err := parser.Parse(strings.NewReader(builder.RockerfileContent)) - if err != nil { - return err - } - - builder.rootNode = rootNode - builder.dockerfile = &parser.Node{} - - defer func() { - if err2 := builder.cleanup(); err2 != nil && err == nil { - err = err2 - } - }() - - for builder.i = 0; builder.i < len(builder.rootNode.Children); builder.i++ { - oldImageID := builder.imageID - - if err := builder.dispatch(builder.i, builder.rootNode.Children[builder.i]); err != nil { - return err - } - - if builder.imageID != oldImageID && builder.imageID != "" { - fmt.Fprintf(builder.OutStream, "[Rocker] ---> %.12s\n", builder.imageID) - } - } - - if err := builder.runDockerfile(); err != nil { - return err - } - - return nil - } - - if err := run(); err != nil { - return "", err - } - - if builder.imageID == "" { - return "", fmt.Errorf("No image was generated. Is your Rockerfile empty?") - } - - fmt.Fprintf(builder.OutStream, "[Rocker] Successfully built %.12s\n", builder.imageID) - - return builder.imageID, nil -} - -// dispatch runs a particular command -func (builder *Builder) dispatch(stepN int, node *parser.Node) (err error) { - cmd := node.Value - attrs := node.Attributes - original := node.Original - args := []string{} - flags := parseFlags(node.Flags) - - // fill in args and substitute vars - for n := node.Next; n != nil; n = n.Next { - // TODO: we also may want to collect ENV variables to use in EXPORT for example - n.Value = builder.Vars.ReplaceString(n.Value) - args = append(args, n.Value) - } - - switch cmd { - - case "mount", "run", "export", "import", "tag", "push", "require", "var", "include", "attach", "from": - // we do not have to eval RUN ourselves if we have no mounts - if cmd == "run" && len(builder.mounts) == 0 { - break - } - // also skip initial FROM command - if cmd == "from" && builder.imageID == "" { - break - } - // run dockerfile we have collected so far - // except if we have met INCLUDE - if cmd != "include" { - if err := builder.runDockerfile(); err != nil { - return err - } - } - - // do not want to report processing FROM command (unnecessary) - if cmd != "from" { - fmt.Fprintf(builder.OutStream, "[Rocker] %s %s\n", strings.ToUpper(cmd), strings.Join(args, " ")) - } - - switch cmd { - case "mount": - return builder.cmdMount(args, attrs, flags, original) - case "export": - return builder.cmdExport(args, attrs, flags, original) - case "import": - return builder.cmdImport(args, attrs, flags, original) - case "run": - return builder.cmdRun(args, attrs, flags, original) - case "tag": - return builder.cmdTag(args, attrs, flags, original) - case "push": - return builder.cmdPush(args, attrs, flags, original) - case "require": - return builder.cmdRequire(args, attrs, flags, original) - case "var": - return builder.cmdVar(args, attrs, flags, original) - case "include": - return builder.cmdInclude(args, attrs, flags, original) - case "attach": - return builder.cmdAttach(args, attrs, flags, original) - case "from": - // We don't need previous image - // TODO: check it will be not deleted if tagged - builder.intermediateImages = append(builder.intermediateImages, builder.imageID) - builder.reset() - } - - // use it for warnings if .git is not ignored - case "add", "copy": - addAll := false - if len(args) > 0 { - for _, arg := range args[:len(args)-1] { - allArg := arg == "/" || arg == "." || arg == "./" || arg == "*" || arg == "./*" - addAll = addAll || allArg - } - } - hasGitInRoot := false - if _, err := os.Stat(builder.ContextDir + "/.git"); err == nil { - hasGitInRoot = true - } - if hasGitInRoot && !builder.gitIgnored && addAll { - fmt.Fprintf(builder.OutStream, - "[Rocker] *** WARNING .git is not ignored in .dockerignore; not ignoring .git will beat caching of: %s\n", original) - } - } - - // TODO: cancel build? - - // collect dockerfile - builder.pushToDockerfile(node) - - return nil -} - -// reset does reset the builder state; it is used in between different FROMs -// it doest not reset completely, some properties are shared across FROMs -func (builder *Builder) reset() { - builder.mounts = []builderMount{} - builder.imageID = "" - builder.dockerfile = &parser.Node{} - builder.Config = &docker.Config{} - builder.cacheBusted = false - builder.metaAdded = false - return -} - -// pushToDockerfile collects commands that will falled back to a `docker build` -func (builder *Builder) pushToDockerfile(node *parser.Node) { - builder.dockerfile.Children = append(builder.dockerfile.Children, node) -} - -// addMount adds a mount structure to the state -func (builder *Builder) addMount(mount builderMount) { - builder.mounts = append(builder.mounts, mount) - builder.allMounts = append(builder.allMounts, mount) -} - -// removeLastMount pops mount structure from the state -func (builder *Builder) removeLastMount() { - if len(builder.mounts) == 0 { - return - } - builder.mounts = builder.mounts[0 : len(builder.mounts)-1] -} - -// rockerfileName returns basename of current Rockerfile -func (builder *Builder) rockerfileName() string { - return filepath.Base(builder.Rockerfile) -} - -// rockerfileRelativePath returns the path of the current Rockerfile relative to the context dir -// TODO: whyrockerfileRelativePath() returns the basename instead? Need to test it -func (builder *Builder) rockerfileRelativePath() string { - return filepath.Base(builder.Rockerfile) -} - -// dockerfileName generates the name of Dockerfile that will be written to a context dir -// and then thrown to a `docker build` fallback -func (builder *Builder) dockerfileName() string { - // Here we cannot puth temporary Dockerfile into tmp directory - // That's how docker ignore technique works - it does not remove the direcotry itself, sadly - dockerfileName := builder.getTmpPrefix() + "_" + builder.rockerfileName() - if builder.imageID == "" { - return dockerfileName + "_init" - } - return dockerfileName + "_" + fmt.Sprintf("%.12s", builder.imageID) -} - -// getTmpPrefix returns the prefix for all of rocker's tmp files that will be written -// to the currect directory -func (builder *Builder) getTmpPrefix() string { - return ".rockertmp" -} - -// getIdentifier returns the sequence that is unique to the current Rockerfile -func (builder *Builder) getIdentifier() string { - if builder.ID != "" { - return builder.ID - } - return builder.ContextDir + ":" + builder.Rockerfile -} - -// mountsContainerName returns the name of volume container that will be used for a particular MOUNT -func (builder *Builder) mountsContainerName(destinations []string) string { - // TODO: should mounts be reused between different FROMs ? - mountID := builder.getIdentifier() + ":" + strings.Join(destinations, ":") - return fmt.Sprintf("rocker_mount_%.6x", md5.Sum([]byte(mountID))) -} - -// exportsContainerName return the name of volume container that will be used for EXPORTs -func (builder *Builder) exportsContainerName() string { - mountID := builder.getIdentifier() - return fmt.Sprintf("rocker_exports_%.6x", md5.Sum([]byte(mountID))) -} - -// cleanup cleans all tmp files produced by the build -func (builder *Builder) cleanup() error { - // All we have to do is remove tmpDir - // This will disable us to do parallel builds, but much easier to implement! - os.RemoveAll(path.Join(builder.ContextDir, builder.getTmpPrefix())) - return nil -} diff --git a/src/rocker/build/builder_test.go b/src/rocker/build/builder_test.go deleted file mode 100644 index c5220e2e..00000000 --- a/src/rocker/build/builder_test.go +++ /dev/null @@ -1,1152 +0,0 @@ -/*- - * Copyright 2015 Grammarly, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package build - -// This is a suite of integration tests for rocker/build -// I have no idea of how to isolate it and run without Docker - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "os" - "testing" - "time" - - "rocker/dockerclient" - "rocker/template" - "rocker/test" - "rocker/util" - - "github.com/stretchr/testify/assert" - - "github.com/fsouza/go-dockerclient" -) - -func TestBuilderBuildBasic(t *testing.T) { - - tempDir, err := ioutil.TempDir("/tmp", "rocker_TestBuilderBuild_") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempDir) - - err = test.MakeFiles(tempDir, map[string]string{ - "/Rockerfile": `FROM busybox:buildroot-2013.08.1 -ENTRYPOINT ls / -RUN touch /testing`, - }) - - // we will need docker client to cleanup and do some cross-checks - client, err := dockerclient.New() - if err != nil { - t.Fatal(err) - } - - builder := &Builder{ - Rockerfile: tempDir + "/Rockerfile", - OutStream: util.PrefixPipe("[TEST] ", os.Stdout), - Docker: client, - } - - imageID, err := builder.Build() - if err != nil { - t.Fatal(err) - } - t.Logf("Got imageID: %s", imageID) - - defer func() { - if err := client.RemoveImageExtended(imageID, docker.RemoveImageOptions{Force: true}); err != nil { - t.Log(err) - } - }() - - result, err := runContainer(t, client, &docker.Config{ - Image: imageID, - }, nil) - - t.Logf("Got result: %s", result) - - assert.Contains(t, result, "testing", "expected result (ls) to contain testing file") -} - -func TestBuilderBuildTag(t *testing.T) { - - tempDir, err := ioutil.TempDir("/tmp", "rocker_TestBuilderBuildTag_") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempDir) - - err = test.MakeFiles(tempDir, map[string]string{ - "/Rockerfile": `FROM busybox:buildroot-2013.08.1 -TAG testing -RUN touch /testing -PUSH quay.io/testing_project`, - }) - - // we will need docker client to cleanup and do some cross-checks - client, err := dockerclient.New() - if err != nil { - t.Fatal(err) - } - - builder := &Builder{ - Rockerfile: tempDir + "/Rockerfile", - OutStream: util.PrefixPipe("[TEST] ", os.Stdout), - Docker: client, - } - - imageID, err := builder.Build() - if err != nil { - t.Fatal(err) - } - t.Logf("Got imageID: %s", imageID) - - defer func() { - if err := client.RemoveImageExtended(imageID, docker.RemoveImageOptions{Force: true}); err != nil { - t.Log(err) - } - }() - - result, err := runContainer(t, client, &docker.Config{ - Image: imageID, - Cmd: []string{"ls", "/"}, - }, nil) - - t.Logf("Got result: %s", result) - - assert.Equal(t, "true", "true", "failed") -} - -func TestBuilderBuildSemverTag(t *testing.T) { - - tempDir, err := ioutil.TempDir("/tmp", "rocker_TestBuilderBuildSemverTag_") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempDir) - - err = test.MakeFiles(tempDir, map[string]string{ - "/Rockerfile": `FROM scratch -TAG --semver testing:1.2.3-build123`, - }) - - // we will need docker client to cleanup and do some cross-checks - client, err := dockerclient.New() - if err != nil { - t.Fatal(err) - } - - builder := &Builder{ - Rockerfile: tempDir + "/Rockerfile", - OutStream: util.PrefixPipe("[TEST] ", os.Stdout), - Docker: client, - // Vars: VarsFromStrings([]string{"branch=master", "commit=314ad"}), - } - - imageID, err := builder.Build() - if err != nil { - t.Fatal(err) - } - t.Logf("Got imageID: %s", imageID) - - defer func() { - if err := client.RemoveImageExtended(imageID, docker.RemoveImageOptions{Force: true}); err != nil { - t.Log(err) - } - }() - - result, err := runContainer(t, client, &docker.Config{ - Image: imageID, - Cmd: []string{"ls", "/"}, - }, nil) - - t.Logf("Got result: %s", result) - - assert.Equal(t, "true", "true", "failed") -} - -func TestBuilderBuildTagLabels(t *testing.T) { - - tempDir, err := ioutil.TempDir("/tmp", "rocker_TestBuilderBuildTagLabels_") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempDir) - - rockerfileContent := `FROM busybox:buildroot-2013.08.1 -TAG testing -RUN touch /testing -LABEL foo=bar -PUSH quay.io/testing_project` - - err = test.MakeFiles(tempDir, map[string]string{ - "/Rockerfile": rockerfileContent, - }) - - // we will need docker client to cleanup and do some cross-checks - client, err := dockerclient.New() - if err != nil { - t.Fatal(err) - } - - vars, err := template.VarsFromStrings([]string{"asd=qwe"}) - if err != nil { - t.Fatal(err) - } - - builder := &Builder{ - Rockerfile: tempDir + "/Rockerfile", - OutStream: util.PrefixPipe("[TEST] ", os.Stdout), - CliVars: vars, - Docker: client, - AddMeta: true, - } - - imageID, err := builder.Build() - if err != nil { - t.Fatal(err) - } - t.Logf("Got imageID: %s", imageID) - - defer func() { - if err := client.RemoveImageExtended(imageID, docker.RemoveImageOptions{Force: true}); err != nil { - t.Log(err) - } - }() - - inspect, err := client.InspectImage(imageID) - if err != nil { - t.Fatal(err) - } - - // test inherited labels - assert.Equal(t, "bar", inspect.Config.Labels["foo"]) - - // test rockerfile content - data := &RockerImageData{} - if err := json.Unmarshal([]byte(inspect.Config.Labels["rocker-data"]), data); err != nil { - t.Fatal(err) - } - assert.Equal(t, rockerfileContent, data.Rockerfile) - - // test vars - assert.Equal(t, vars, data.Vars) -} - -func TestBuilderBuildMounts(t *testing.T) { - - tempDir, err := ioutil.TempDir("/tmp", "rocker_TestBuilderBuildTag_") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempDir) - - err = test.MakeFiles(tempDir, map[string]string{ - "/Rockerfile": `FROM busybox:buildroot-2013.08.1 -MOUNT /app/node_modules /app/bower_components -RUN ls /app > /out -CMD cat /out`, - }) - - // we will need docker client to cleanup and do some cross-checks - client, err := dockerclient.New() - if err != nil { - t.Fatal(err) - } - - builder := &Builder{ - Rockerfile: tempDir + "/Rockerfile", - OutStream: util.PrefixPipe("[TEST] ", os.Stdout), - Docker: client, - } - - imageID, err := builder.Build() - if err != nil { - t.Fatal(err) - } - t.Logf("Got imageID: %s", imageID) - - defer func() { - if err := client.RemoveImageExtended(imageID, docker.RemoveImageOptions{Force: true}); err != nil { - t.Log(err) - } - }() - - // Cleanup mount containers - defer func() { - for _, mountContainerID := range builder.getAllMountContainerIds() { - if err := client.RemoveContainer(docker.RemoveContainerOptions{ - ID: mountContainerID, - RemoveVolumes: true, - Force: true, - }); err != nil { - t.Log(err) - } - } - }() - - result, err := runContainer(t, client, &docker.Config{ - Image: imageID, - }, nil) - - t.Logf("Got result: %s", result) - - assert.Equal(t, "bower_components\nnode_modules\n", result, "expected both volumes to be mounted") - assert.Equal(t, 1, len(builder.getMountContainerIds()), "expected only one volume container to be created") -} - -func TestBuilderMountFromHost(t *testing.T) { - - wd, err := os.Getwd() - if err != nil { - t.Fatal(err) - } - - // Use current working directroy as a temp dir to make MOUNT work in boot2docker - tempDir, err := ioutil.TempDir(wd, "rocker_TestBuilderMountFromHost_") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempDir) - - err = test.MakeFiles(tempDir, map[string]string{ - "/Rockerfile": `FROM busybox:buildroot-2013.08.1 -MOUNT .:/src -RUN echo "hello" > /src/test`, - }) - - // we will need docker client to cleanup and do some cross-checks - client, err := dockerclient.New() - if err != nil { - t.Fatal(err) - } - - builder := &Builder{ - Rockerfile: tempDir + "/Rockerfile", - OutStream: util.PrefixPipe("[TEST] ", os.Stdout), - Docker: client, - } - - imageID, err := builder.Build() - if err != nil { - t.Fatal(err) - } - t.Logf("Got imageID: %s", imageID) - - defer func() { - if err := client.RemoveImageExtended(imageID, docker.RemoveImageOptions{Force: true}); err != nil { - t.Log(err) - } - }() - - content, err := ioutil.ReadFile(tempDir + "/test") - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, "hello\n", string(content)) -} - -func TestBuilderBuildVars(t *testing.T) { - - tempDir, err := ioutil.TempDir("/tmp", "rocker_TestBuilderBuildVars_") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempDir) - - err = test.MakeFiles(tempDir, map[string]string{ - "/Rockerfile": `FROM busybox:buildroot-2013.08.1 -RUN echo "version:$version" > /version`, - }) - - // we will need docker client to cleanup and do some cross-checks - client, err := dockerclient.New() - if err != nil { - t.Fatal(err) - } - - vars, err := template.VarsFromStrings([]string{"version=125"}) - if err != nil { - t.Fatal(err) - } - - builder := &Builder{ - Rockerfile: tempDir + "/Rockerfile", - OutStream: util.PrefixPipe("[TEST] ", os.Stdout), - Vars: vars, - // Push: true, - Docker: client, - } - - imageID, err := builder.Build() - if err != nil { - t.Fatal(err) - } - t.Logf("Got imageID: %s", imageID) - - defer func() { - if err := client.RemoveImageExtended(imageID, docker.RemoveImageOptions{Force: true}); err != nil { - t.Log(err) - } - }() - - result, err := runContainer(t, client, &docker.Config{ - Image: imageID, - Cmd: []string{"cat", "/version"}, - }, nil) - - assert.Equal(t, "version:125\n", result, "failed") -} - -func TestBuilderBuildMultiple(t *testing.T) { - - tempDir, err := ioutil.TempDir("/tmp", "rocker_TestBuilderBuildMultiple_") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempDir) - - err = test.MakeFiles(tempDir, map[string]string{ - "/index.js": "console.log('hello')", - "/data/README": "hello", - "/Rockerfile": ` -FROM busybox:buildroot-2013.08.1 -ADD . /app -MOUNT /app/node_modules -RUN echo "hehe" > /app/node_modules/some_module && \ - cd /app/node_modules && \ - ln -sf some_module link_to_some_module -EXPORT /app -FROM busybox:buildroot-2013.08.1 -IMPORT /app - `, - }) - if err != nil { - t.Fatal(err) - } - - imageIDs := make(map[string]struct{}) - mounts := make(map[string]struct{}) - - // we will need docker client to cleanup and do some cross-checks - client, err := dockerclient.New() - if err != nil { - t.Fatal(err) - } - - run := func() (imageID string, err error) { - - builder := &Builder{ - Rockerfile: tempDir + "/Rockerfile", - UtilizeCache: true, - OutStream: util.PrefixPipe("[TEST] ", os.Stdout), - Docker: client, - } - - defer func() { - for _, mountContainerID := range builder.getAllMountContainerIds() { - if mountContainerID != "" { - mounts[mountContainerID] = struct{}{} - } - } - }() - - imageID, err = builder.Build() - if err != nil { - return "", err - } - t.Logf("Got imageID: %s", imageID) - - imageIDs[imageID] = struct{}{} - - for _, imageID := range builder.intermediateImages { - imageIDs[imageID] = struct{}{} - } - - return imageID, nil - } - - defer func() { - for imageID := range imageIDs { - if err := client.RemoveImageExtended(imageID, docker.RemoveImageOptions{Force: true}); err != nil { - t.Log(err) - } - } - }() - - // Cleanup mount containers - defer func() { - for mountContainerID := range mounts { - if err := client.RemoveContainer(docker.RemoveContainerOptions{ - ID: mountContainerID, - RemoveVolumes: true, - Force: true, - }); err != nil { - t.Log(err) - } - } - }() - - imageID1, err := run() - if err != nil { - t.Fatal(err) - } - - fmt.Println("============================================================") - - imageID2, err := run() - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, imageID1, imageID2, "expected images to be equal (valid caching behavior)") -} - -func TestBuilderBuildContainerVolume(t *testing.T) { - - tempDir, err := ioutil.TempDir("/tmp", "rocker_TestBuilderBuildContainerVolume_") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempDir) - - err = test.MakeFiles(tempDir, map[string]string{ - "/Rockerfile": `FROM busybox:buildroot-2013.08.1 -MOUNT /cache -RUN echo "hello" >> /cache/output.log -RUN cp /cache/output.log /result_cache.log -CMD cat /result_cache.log`, - }) - - // we will need docker client to cleanup and do some cross-checks - client, err := dockerclient.New() - if err != nil { - t.Fatal(err) - } - - // Step 1 - - runUtilizeCache := func(utilizeCache bool) (result string, err error) { - builder := &Builder{ - Rockerfile: tempDir + "/Rockerfile", - OutStream: util.PrefixPipe("[TEST] ", os.Stdout), - UtilizeCache: utilizeCache, - Docker: client, - } - - imageID, err := builder.Build() - if err != nil { - return "", err - } - t.Logf("Got imageID: %s", imageID) - - // Cleanup mount containers - defer func() { - for _, mountContainerID := range builder.getAllMountContainerIds() { - if err := client.RemoveContainer(docker.RemoveContainerOptions{ - ID: mountContainerID, - RemoveVolumes: true, - Force: true, - }); err != nil { - t.Log(err) - } - } - }() - - defer func() { - if err := client.RemoveImageExtended(imageID, docker.RemoveImageOptions{Force: true}); err != nil { - t.Log(err) - } - }() - - // Step 2 - - builder2 := &Builder{ - Rockerfile: tempDir + "/Rockerfile", - OutStream: util.PrefixPipe("[TEST] ", os.Stdout), - UtilizeCache: utilizeCache, - Docker: client, - } - - imageID2, err := builder2.Build() - if err != nil { - return "", err - } - t.Logf("Got imageID2: %s", imageID2) - - defer func() { - if err := client.RemoveImageExtended(imageID2, docker.RemoveImageOptions{Force: true}); err != nil { - t.Log(err) - } - }() - - return runContainer(t, client, &docker.Config{ - Image: imageID2, - }, nil) - } - - result1, err := runUtilizeCache(true) - if err != nil { - t.Fatal(err) - } - assert.Equal(t, "hello\n", result1, "failed") - - result2, err := runUtilizeCache(false) - if err != nil { - t.Fatal(err) - } - assert.Equal(t, "hello\nhello\n", result2, "failed") -} - -func TestBuilderBuildAddCache(t *testing.T) { - - tempDir, err := ioutil.TempDir("/tmp", "rocker_TestBuilderBuildAddCache_") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempDir) - - err = test.MakeFiles(tempDir, map[string]string{ - "/data/README": "hello", - "/Rockerfile": ` -FROM busybox:buildroot-2013.08.1 -ADD . /src -RUN ls -la /src -`, - }) - if err != nil { - t.Fatal(err) - } - - var imageIDs []string - - // we will need docker client to cleanup and do some cross-checks - client, err := dockerclient.New() - if err != nil { - t.Fatal(err) - } - - run := func() (imageID string, err error) { - - builder := &Builder{ - Rockerfile: tempDir + "/Rockerfile", - UtilizeCache: true, - OutStream: util.PrefixPipe("[TEST] ", os.Stdout), - Docker: client, - } - - imageID, err = builder.Build() - if err != nil { - return "", err - } - t.Logf("Got imageID: %s", imageID) - - imageIDs = append(imageIDs, imageID) - - return imageID, nil - } - - defer func() { - for _, imageID := range imageIDs { - if err := client.RemoveImageExtended(imageID, docker.RemoveImageOptions{Force: true}); err != nil { - t.Log(err) - } - } - }() - - imageID1, err := run() - if err != nil { - t.Fatal(err) - } - - time.Sleep(time.Second) - - imageID2, err := run() - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, imageID1, imageID2, "expected images to be equal (valid caching behavior)") -} - -func TestBuilderBuildRequire(t *testing.T) { - - tempDir, err := ioutil.TempDir("/tmp", "rocker_TestBuilderBuildRequire_") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempDir) - - err = test.MakeFiles(tempDir, map[string]string{ - "/Rockerfile": `FROM busybox:buildroot-2013.08.1 -REQUIRE version -RUN echo "$version" > /testing -CMD cat /testing`, - }) - - // we will need docker client to cleanup and do some cross-checks - client, err := dockerclient.New() - if err != nil { - t.Fatal(err) - } - - run := func(vars []string) (string, error) { - tlpVars, err := template.VarsFromStrings(vars) - if err != nil { - return "", err - } - - builder := &Builder{ - Rockerfile: tempDir + "/Rockerfile", - OutStream: util.PrefixPipe("[TEST] ", os.Stdout), - Docker: client, - Vars: tlpVars, - } - - imageID, err := builder.Build() - if err != nil { - return "", err - } - t.Logf("Got imageID: %s", imageID) - - defer func() { - if err := client.RemoveImageExtended(imageID, docker.RemoveImageOptions{Force: true}); err != nil { - t.Log(err) - } - }() - - return runContainer(t, client, &docker.Config{ - Image: imageID, - }, nil) - } - - _, err1 := run([]string{}) - result, err2 := run([]string{"version=123"}) - - assert.Equal(t, "Var $version is required but not set", err1.Error()) - assert.Nil(t, err2, "expected second run to not give error") - assert.Equal(t, "123\n", result) -} - -func TestBuilderBuildVar(t *testing.T) { - - tempDir, err := ioutil.TempDir("/tmp", "rocker_TestBuilderBuildVar_") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempDir) - - err = test.MakeFiles(tempDir, map[string]string{ - "/Rockerfile": `FROM busybox:buildroot-2013.08.1 -VAR test=true -RUN touch /testing -RUN if [ "$test" == "true" ] ; then echo "done test" > /testing; fi -CMD cat /testing`, - }) - - // we will need docker client to cleanup and do some cross-checks - client, err := dockerclient.New() - if err != nil { - t.Fatal(err) - } - - run := func(vars []string) (string, error) { - tplVars, err := template.VarsFromStrings(vars) - if err != nil { - return "", err - } - - builder := &Builder{ - Rockerfile: tempDir + "/Rockerfile", - OutStream: util.PrefixPipe("[TEST] ", os.Stdout), - Docker: client, - Vars: tplVars, - } - - imageID, err := builder.Build() - if err != nil { - return "", err - } - t.Logf("Got imageID: %s", imageID) - - defer func() { - if err := client.RemoveImageExtended(imageID, docker.RemoveImageOptions{Force: true}); err != nil { - t.Log(err) - } - }() - - return runContainer(t, client, &docker.Config{ - Image: imageID, - }, nil) - } - - result1, err := run([]string{}) - if err != nil { - t.Fatal(err) - } - - result2, err := run([]string{"test=false"}) - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, "done test\n", result1) - assert.Equal(t, "", result2) -} - -func TestBuilderBuildAttach(t *testing.T) { - t.Skip() - - tempDir, err := ioutil.TempDir("/tmp", "rocker_TestBuilderBuildAttach_") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempDir) - - err = test.MakeFiles(tempDir, map[string]string{ - "/Rockerfile": `FROM busybox:buildroot-2013.08.1 -CMD ["/bin/sh"] -ATTACH --name=test-attach ["ls", "-la"]`, - }) - - // we will need docker client to cleanup and do some cross-checks - client, err := dockerclient.New() - if err != nil { - t.Fatal(err) - } - - builder := &Builder{ - Rockerfile: tempDir + "/Rockerfile", - InStream: os.Stdin, - OutStream: util.PrefixPipe("[TEST] ", os.Stdout), - Docker: client, - Attach: true, - } - - imageID, err := builder.Build() - if err != nil { - t.Fatal(err) - } - t.Logf("Got imageID: %s", imageID) - - defer func() { - if err := client.RemoveImageExtended(imageID, docker.RemoveImageOptions{Force: true}); err != nil { - t.Log(err) - } - }() -} - -func TestBuilderEnsureImage(t *testing.T) { - t.Skip() - - // we will need docker client to cleanup and do some cross-checks - client, err := dockerclient.New() - if err != nil { - t.Fatal(err) - } - - builder := &Builder{ - OutStream: util.PrefixPipe("[TEST] ", os.Stdout), - Docker: client, - Auth: &docker.AuthConfiguration{}, - } - - image := "busybox:buildroot-2013.08.1" - - if err := builder.ensureImage(image, "testing"); err != nil { - t.Fatal(err) - } - - assert.Equal(t, "", "") -} - -func TestBuilderEnsureContainer(t *testing.T) { - t.Skip() - - // we will need docker client to cleanup and do some cross-checks - client, err := dockerclient.New() - if err != nil { - t.Fatal(err) - } - - builder := &Builder{ - OutStream: util.PrefixPipe("[TEST] ", os.Stdout), - Docker: client, - Auth: &docker.AuthConfiguration{}, - } - - containerConfig := &docker.Config{ - Image: "grammarly/rsync-static:1", - } - containerName := "rocker_TestBuilderEnsureContainer" - - defer func() { - if err := client.RemoveContainer(docker.RemoveContainerOptions{ID: containerName, Force: true}); err != nil { - t.Fatal(err) - } - }() - - if _, err := builder.ensureContainer(containerName, containerConfig, "testing"); err != nil { - t.Fatal(err) - } - - assert.Equal(t, "", "") -} - -func TestBuilderBuildGitWarning(t *testing.T) { - t.Skip() - - tempDir, err := ioutil.TempDir("/tmp", "rocker_TestBuilderBuildGitWarning_") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempDir) - - err = test.MakeFiles(tempDir, map[string]string{ - "/.git/HEAD": "hello", - "/testing": "hello2", - "/Rockerfile": `FROM busybox:buildroot-2013.08.1 -ADD . /`, - }) - - // we will need docker client to cleanup and do some cross-checks - client, err := dockerclient.New() - if err != nil { - t.Fatal(err) - } - - builder := &Builder{ - Rockerfile: tempDir + "/Rockerfile", - OutStream: util.PrefixPipe("[TEST] ", os.Stdout), - Docker: client, - } - - imageID, err := builder.Build() - if err != nil { - t.Fatal(err) - } - t.Logf("Got imageID: %s", imageID) - - defer func() { - if err := client.RemoveImageExtended(imageID, docker.RemoveImageOptions{Force: true}); err != nil { - t.Log(err) - } - }() - - result, err := runContainer(t, client, &docker.Config{ - Image: imageID, - }, nil) - - t.Logf("Got result: %q", result) - - assert.Contains(t, result, "testing", "expected result (ls) to contain testing file") -} - -func TestBuilderBuildInclude(t *testing.T) { - - tempDir, err := ioutil.TempDir("/tmp", "rocker_TestBuilderBuildInclude_") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempDir) - - err = test.MakeFiles(tempDir, map[string]string{ - "/nodejs": ` -RUN touch /test/bin/nodejs -RUN touch /test/bin/npm -`, - "/java": ` -RUN touch /test/bin/java -RUN touch /test/bin/gradle -`, - "/Rockerfile": ` -FROM busybox:buildroot-2013.08.1 -RUN mkdir -p /test/bin -INCLUDE nodejs -INCLUDE java -CMD ["ls", "/test/bin"] -`, - }) - - // we will need docker client to cleanup and do some cross-checks - client, err := dockerclient.New() - if err != nil { - t.Fatal(err) - } - - builder := &Builder{ - Rockerfile: tempDir + "/Rockerfile", - OutStream: util.PrefixPipe("[TEST] ", os.Stdout), - Docker: client, - } - - imageID, err := builder.Build() - if err != nil { - t.Fatal(err) - } - t.Logf("Got imageID: %s", imageID) - - defer func() { - if err := client.RemoveImageExtended(imageID, docker.RemoveImageOptions{Force: true}); err != nil { - t.Log(err) - } - }() - - result, err := runContainer(t, client, &docker.Config{ - Image: imageID, - }, nil) - - t.Logf("Got result: %q", result) - - assert.Equal(t, "gradle\njava\nnodejs\nnpm\n", result, "expected result (ls) to contain included files") -} - -func TestBuilderImportFromScratch(t *testing.T) { - - tempDir, err := ioutil.TempDir("/tmp", "rocker_TestBuilderImportFromScratch_") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempDir) - - err = test.MakeFiles(tempDir, map[string]string{ - "/Rockerfile": ` -FROM busybox:buildroot-2013.08.1 -RUN mkdir -p /zzz && echo "hi" > /zzz/lalala -EXPORT zzz / - -FROM scratch -IMPORT zzz / -CMD ["true"] -`, - }) - - // we will need docker client to cleanup and do some cross-checks - client, err := dockerclient.New() - if err != nil { - t.Fatal(err) - } - - builder := &Builder{ - Rockerfile: tempDir + "/Rockerfile", - OutStream: util.PrefixPipe("[TEST] ", os.Stdout), - Docker: client, - } - - imageID, err := builder.Build() - if err != nil { - t.Fatal(err) - } - t.Logf("Got imageID: %s", imageID) - - defer func() { - if err := client.RemoveImageExtended(imageID, docker.RemoveImageOptions{Force: true}); err != nil { - t.Log(err) - } - }() - - // Create data volume container with scratch image - c, err := client.CreateContainer(docker.CreateContainerOptions{ - Config: &docker.Config{ - Image: imageID, - Volumes: map[string]struct{}{ - "/zzz": struct{}{}, - }, - }, - }) - if err != nil { - t.Fatal(err) - } - defer func() { - if err := client.RemoveContainer(docker.RemoveContainerOptions{ID: c.ID, RemoveVolumes: true, Force: true}); err != nil { - t.Log(err) - } - }() - - result, err := runContainer(t, client, &docker.Config{ - Image: "busybox:buildroot-2013.08.1", - Cmd: []string{"/bin/sh", "-c", "cat /zzz/lalala"}, - }, &docker.HostConfig{ - VolumesFrom: []string{c.ID}, - }) - - t.Logf("Got result: %q", result) - - assert.Equal(t, "hi\n", result) -} - -func runContainer(t *testing.T, client *docker.Client, config *docker.Config, hostConfig *docker.HostConfig) (result string, err error) { - if config == nil { - config = &docker.Config{} - } - if hostConfig == nil { - hostConfig = &docker.HostConfig{} - } - - opts := docker.CreateContainerOptions{ - Config: config, - HostConfig: hostConfig, - } - - container, err := client.CreateContainer(opts) - if err != nil { - return "", err - } - - // remove container after testing - defer func() { - if err2 := client.RemoveContainer(docker.RemoveContainerOptions{ID: container.ID, Force: true}); err2 != nil && err == nil { - err = err2 - } - }() - - success := make(chan struct{}) - var buf bytes.Buffer - - attachOpts := docker.AttachToContainerOptions{ - Container: container.ID, - OutputStream: &buf, - ErrorStream: &buf, - Stream: true, - Stdout: true, - Stderr: true, - Success: success, - } - go client.AttachToContainer(attachOpts) - - success <- <-success - - err = client.StartContainer(container.ID, &docker.HostConfig{}) - if err != nil { - return "", err - } - - statusCode, err := client.WaitContainer(container.ID) - if err != nil { - return "", err - } - - if statusCode != 0 { - return "", fmt.Errorf("Failed to run container, exit with code %d", statusCode) - } - - return buf.String(), nil -} diff --git a/src/rocker/build2/cache.go b/src/rocker/build/cache.go similarity index 99% rename from src/rocker/build2/cache.go rename to src/rocker/build/cache.go index e10efbc0..e20fe47d 100644 --- a/src/rocker/build2/cache.go +++ b/src/rocker/build/cache.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package build2 +package build import ( "encoding/json" diff --git a/src/rocker/build2/cache_test.go b/src/rocker/build/cache_test.go similarity index 98% rename from src/rocker/build2/cache_test.go rename to src/rocker/build/cache_test.go index 0844cdd0..9e534745 100644 --- a/src/rocker/build2/cache_test.go +++ b/src/rocker/build/cache_test.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package build2 +package build import ( "io/ioutil" diff --git a/src/rocker/build2/client.go b/src/rocker/build/client.go similarity index 99% rename from src/rocker/build2/client.go rename to src/rocker/build/client.go index 9b8516a5..15219210 100644 --- a/src/rocker/build2/client.go +++ b/src/rocker/build/client.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package build2 +package build import ( "fmt" diff --git a/src/rocker/build2/client_tty.go b/src/rocker/build/client_tty.go similarity index 99% rename from src/rocker/build2/client_tty.go rename to src/rocker/build/client_tty.go index 124065e2..78b15810 100644 --- a/src/rocker/build2/client_tty.go +++ b/src/rocker/build/client_tty.go @@ -2,7 +2,7 @@ // Licensed under the Apache License, Version 2.0; Copyright 2013-2015 Docker, Inc. See LICENSE.APACHE // NOTICE: no changes has been made to these functions code -package build2 +package build import ( "io" diff --git a/src/rocker/build/commands.go b/src/rocker/build/commands.go index 46dbcf32..46442c54 100644 --- a/src/rocker/build/commands.go +++ b/src/rocker/build/commands.go @@ -17,552 +17,1205 @@ package build import ( - "encoding/json" "fmt" - "io/ioutil" "os" - "os/user" "path" "path/filepath" + "regexp" + "rocker/util" "sort" "strings" - "rocker/dockerclient" - "rocker/imagename" - "rocker/parser" - "rocker/template" - "rocker/util" - + log "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/nat" + "github.com/docker/docker/pkg/units" "github.com/fsouza/go-dockerclient" ) -// cmdRun implements RUN command -// If there were no MOUNTs before, rocker falls back to `docker build` to run it -func (builder *Builder) cmdRun(args []string, attributes map[string]bool, flags map[string]string, original string) (err error) { - cmd := handleJSONArgs(args, attributes) +const ( + COMMIT_SKIP = "COMMIT_SKIP" +) - if !attributes["json"] { - cmd = append([]string{"/bin/sh", "-c"}, cmd...) +type ConfigCommand struct { + name string + args []string + attrs map[string]bool + flags map[string]string + original string + isOnbuild bool +} + +type Command interface { + // Execute does the command execution and returns modified state. + // Note that here we use State not by reference because we want + // it to be immutable. In future, it may encoded/decoded from json + // and passed to the external command implementations. + Execute(b *Build) (State, error) + + // Returns true if the command should be executed + ShouldRun(b *Build) (bool, error) + + // String returns the human readable string representation of the command + String() string +} + +func NewCommand(cfg ConfigCommand) (cmd Command, err error) { + // TODO: use reflection? + switch cfg.name { + case "from": + cmd = &CommandFrom{cfg} + case "maintainer": + cmd = &CommandMaintainer{cfg} + case "run": + cmd = &CommandRun{cfg} + case "attach": + cmd = &CommandAttach{cfg} + case "env": + cmd = &CommandEnv{cfg} + case "label": + cmd = &CommandLabel{cfg} + case "workdir": + cmd = &CommandWorkdir{cfg} + case "tag": + cmd = &CommandTag{cfg} + case "push": + cmd = &CommandPush{cfg} + case "copy": + cmd = &CommandCopy{cfg} + case "add": + cmd = &CommandAdd{cfg} + case "cmd": + cmd = &CommandCmd{cfg} + case "entrypoint": + cmd = &CommandEntrypoint{cfg} + case "expose": + cmd = &CommandExpose{cfg} + case "volume": + cmd = &CommandVolume{cfg} + case "user": + cmd = &CommandUser{cfg} + case "onbuild": + cmd = &CommandOnbuild{cfg} + case "mount": + cmd = &CommandMount{cfg} + case "export": + cmd = &CommandExport{cfg} + case "import": + cmd = &CommandImport{cfg} + default: + return nil, fmt.Errorf("Unknown command: %s", cfg.name) } - return builder.runAndCommit(cmd, "run") + if cfg.isOnbuild { + cmd = &CommandOnbuildWrap{cmd} + } + + return cmd, nil } -// cmdMount implements MOUNT command -// TODO: document behavior of cmdMount -func (builder *Builder) cmdMount(args []string, attributes map[string]bool, flags map[string]string, original string) error { - if len(args) == 0 { - return fmt.Errorf("Command is missing value: %s", original) +// CommandFrom implements FROM +type CommandFrom struct { + cfg ConfigCommand +} + +func (c *CommandFrom) String() string { + return c.cfg.original +} + +func (c *CommandFrom) ShouldRun(b *Build) (bool, error) { + return true, nil +} + +func (c *CommandFrom) Execute(b *Build) (s State, err error) { + // TODO: for "scratch" image we may use /images/create + + if len(c.cfg.args) != 1 { + return s, fmt.Errorf("FROM requires one argument") } - // TODO: read flags - useCache := false + var ( + img *docker.Image + name = c.cfg.args[0] + ) - newMounts := []*builderMount{} - newVolumeMounts := []*builderMount{} + if name == "scratch" { + s.NoBaseImage = true + return s, nil + } - for _, arg := range args { - var mount builderMount - if strings.Contains(arg, ":") { - pair := strings.SplitN(arg, ":", 2) - mount = builderMount{cache: useCache, src: pair[0], dest: pair[1]} - } else { - mount = builderMount{cache: useCache, dest: arg} + // If Pull is true, then img will remain nil and it will be pulled below + if !b.cfg.Pull { + if img, err = b.client.InspectImage(name); err != nil { + return s, err } + } - if mount.src == "" { - newVolumeMounts = append(newVolumeMounts, &mount) - } else { - // Process relative paths in volumes - if strings.HasPrefix(mount.src, "~") { - mount.src = strings.Replace(mount.src, "~", os.Getenv("HOME"), 1) - } - if !path.IsAbs(mount.src) { - mount.src = path.Join(builder.ContextDir, mount.src) - } - mount.origSrc = mount.src + if img == nil { + if err = b.client.PullImage(name); err != nil { + return s, err + } + if img, err = b.client.InspectImage(name); err != nil { + return s, err + } + if img == nil { + return s, fmt.Errorf("FROM: Failed to inspect image after pull: %s", name) + } + } - var err error + // We want to say the size of the FROM image. Better to do it + // from the client, but don't know how to do it better, + // without duplicating InspectImage calls and making unnecessary functions - if mount.src, err = dockerclient.ResolveHostPath(mount.src, builder.Docker); err != nil { - return err - } - } + log.WithFields(log.Fields{ + "size": units.HumanSize(float64(img.VirtualSize)), + }).Infof("| Image %.12s", img.ID) - newMounts = append(newMounts, &mount) + s = b.state + s.ImageID = img.ID + s.Config = *img.Config + + b.ProducedSize = 0 + b.VirtualSize = img.VirtualSize + + // If we don't have OnBuild triggers, then we are done + if len(s.Config.OnBuild) == 0 { + return s, nil } - // For volume mounts we need to create (or use existing) volume container - if len(newVolumeMounts) > 0 { - // Collect destinations and sort them alphabetically - // so changing the order on MOUNT commend does not have any effect - dests := make([]string, len(newVolumeMounts)) - containerVolumes := make(map[string]struct{}) + log.Infof("| Found %d ONBUILD triggers", len(s.Config.OnBuild)) - for i, mount := range newVolumeMounts { - dests[i] = mount.dest - containerVolumes[mount.dest] = struct{}{} - } - sort.Strings(dests) - - volumeContainerName := builder.mountsContainerName(dests) - - containerConfig := &docker.Config{ - Image: busyboxImage, - Volumes: containerVolumes, - Labels: map[string]string{ - "Volumes": strings.Join(dests, ":"), - "Rockerfile": builder.Rockerfile, - "ImageId": builder.imageID, - }, + // Remove them from the config, since the config will be committed. + s.InjectCommands = s.Config.OnBuild + s.Config.OnBuild = []string{} + + return s, nil +} + +// CommandMaintainer implements CMD +type CommandMaintainer struct { + cfg ConfigCommand +} + +func (c *CommandMaintainer) String() string { + return c.cfg.original +} + +func (c *CommandMaintainer) ShouldRun(b *Build) (bool, error) { + return true, nil +} + +func (c *CommandMaintainer) Execute(b *Build) (State, error) { + if len(c.cfg.args) != 1 { + return b.state, fmt.Errorf("MAINTAINER requires exactly one argument") + } + + // Don't see any sense of doing a commit here, as Docker does + + return b.state, nil +} + +// CommandReset cleans the builder state before the next FROM +type CommandCleanup struct { + final bool + tagged bool +} + +func (c *CommandCleanup) String() string { + return "Cleaning up" +} + +func (c *CommandCleanup) ShouldRun(b *Build) (bool, error) { + return true, nil +} + +func (c *CommandCleanup) Execute(b *Build) (State, error) { + s := b.state + + if b.cfg.NoGarbage && !c.tagged && s.ImageID != "" && s.ProducedImage { + if err := b.client.RemoveImage(s.ImageID); err != nil { + return s, err } + } + + // Cleanup state + dirtyState := s + s = NewState(b) + + // Keep some stuff between froms + s.ExportsID = dirtyState.ExportsID + + // For final cleanup we want to keep imageID + if c.final { + s.ImageID = dirtyState.ImageID + } else { + log.Infof("====================================") + } + + return s, nil +} + +// CommandCommit commits collected changes +type CommandCommit struct{} + +func (c *CommandCommit) String() string { + return "Commit changes" +} + +func (c *CommandCommit) ShouldRun(b *Build) (bool, error) { + return b.state.GetCommits() != "", nil +} - container, err := builder.ensureContainer(volumeContainerName, containerConfig, strings.Join(dests, ",")) +func (c *CommandCommit) Execute(b *Build) (s State, err error) { + s = b.state + + commits := s.GetCommits() + if commits == "" { + return s, nil + } + + if s.ImageID == "" && !s.NoBaseImage { + return s, fmt.Errorf("Please provide a source image with `from` prior to commit") + } + + // TODO: ? + // if len(commits) == 0 && s.ContainerID == "" { log.Infof("| Skip") + + // TODO: verify that we need to check cache in commit only for + // a non-container actions + + if s.ContainerID == "" { + + // Check cache + var hit bool + s, hit, err = b.probeCache(s) if err != nil { - return err + return s, err + } + if hit { + return s, nil } - // Assing volume container to the list of volume mounts - for _, mount := range newVolumeMounts { - mount.containerID = container.ID + origCmd := s.Config.Cmd + s.Config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + commits} + + if s.ContainerID, err = b.client.CreateContainer(s); err != nil { + return s, err } + + s.Config.Cmd = origCmd } - mountIds := make([]string, len(newMounts)) + defer func(id string) { + s.Commits = []string{} + if err = b.client.RemoveContainer(id); err != nil { + log.Errorf("Failed to remove temporary container %.12s, error: %s", id, err) + } + }(s.ContainerID) - for i, mount := range newMounts { - builder.addMount(*mount) - mountIds[i] = mount.String() + var img *docker.Image + if img, err = b.client.CommitContainer(s, commits); err != nil { + return s, err } - // TODO: check is useCache flag enabled, so we have to make checksum of the directory + s.ContainerID = "" + s.ParentID = s.ImageID + s.ImageID = img.ID + s.ProducedImage = true - if err := builder.commitContainer("", builder.Config.Cmd, fmt.Sprintf("MOUNT %q", mountIds)); err != nil { - return err + if b.cache != nil { + b.cache.Put(s) } - return nil + // Store some stuff to the build + b.ProducedSize += img.Size + b.VirtualSize = img.VirtualSize + + return s, nil } -// cmdExport implements EXPORT command -// TODO: document behavior of cmdExport -func (builder *Builder) cmdExport(args []string, attributes map[string]bool, flags map[string]string, original string) error { - if len(args) == 0 { - return fmt.Errorf("Command is missing value: %s", original) - } - // If only one argument was given to EXPORT, use basename of a file - // EXPORT /my/dir/file.tar --> /EXPORT_VOLUME/file.tar - if len(args) < 2 { - args = []string{args[0], "/"} +// CommandRun implements RUN +type CommandRun struct { + cfg ConfigCommand +} + +func (c *CommandRun) String() string { + return c.cfg.original +} + +func (c *CommandRun) ShouldRun(b *Build) (bool, error) { + return true, nil +} + +func (c *CommandRun) Execute(b *Build) (s State, err error) { + s = b.state + + if s.ImageID == "" && !s.NoBaseImage { + return s, fmt.Errorf("Please provide a source image with `FROM` prior to run") } - dest := args[len(args)-1] // last one is always the dest + cmd := handleJSONArgs(c.cfg.args, c.cfg.attrs) - // EXPORT /my/dir my_dir --> /EXPORT_VOLUME/my_dir - // EXPORT /my/dir /my_dir --> /EXPORT_VOLUME/my_dir - // EXPORT /my/dir stuff/ --> /EXPORT_VOLUME/stuff/my_dir - // EXPORT /my/dir /stuff/ --> /EXPORT_VOLUME/stuff/my_dir - // EXPORT /my/dir/* / --> /EXPORT_VOLUME/stuff/my_dir + if !c.cfg.attrs["json"] { + cmd = append([]string{"/bin/sh", "-c"}, cmd...) + } - exportsContainerID, err := builder.makeExportsContainer() + s.Commit("RUN %q", cmd) + + // Check cache + s, hit, err := b.probeCache(s) if err != nil { - return err + return s, err + } + if hit { + return s, nil } - // prepare builder mount - builder.addMount(builderMount{ - dest: exportsVolume, - containerID: exportsContainerID, - }) - defer builder.removeLastMount() + // TODO: test with ENTRYPOINT - cmdDestPath, err := util.ResolvePath(exportsVolume, dest) - if err != nil { - return fmt.Errorf("Invalid EXPORT destination: %s", dest) + // We run this command in the container using CMD + origCmd := s.Config.Cmd + s.Config.Cmd = cmd + + if s.ContainerID, err = b.client.CreateContainer(s); err != nil { + return s, err } - // TODO: rsync doesn't work as expected if ENTRYPOINT is inherited by parent image - // STILL RELEVANT? + if err = b.client.RunContainer(s.ContainerID, false); err != nil { + b.client.RemoveContainer(s.ContainerID) + return s, err + } - // build the command - cmd := []string{"/opt/rsync/bin/rsync", "-a", "--delete-during"} - cmd = append(cmd, args[0:len(args)-1]...) - cmd = append(cmd, cmdDestPath) + // Restore command after commit + s.Config.Cmd = origCmd + + return s, nil +} + +// CommandAttach implements ATTACH +type CommandAttach struct { + cfg ConfigCommand +} + +func (c *CommandAttach) String() string { + return c.cfg.original +} - // For caching - builder.addLabels(map[string]string{ - "rocker-exportsContainerId": exportsContainerID, - }) +func (c *CommandAttach) ShouldRun(b *Build) (bool, error) { + // TODO: skip attach? + return true, nil +} - // Configure container temporarily, only for this execution - resetFunc := builder.temporaryConfig(func() { - builder.Config.Entrypoint = []string{} - }) - defer resetFunc() +func (c *CommandAttach) Execute(b *Build) (s State, err error) { + s = b.state - fmt.Fprintf(builder.OutStream, "[Rocker] run: %s\n", strings.Join(cmd, " ")) + // simply ignore this command if we don't wanna attach + if !b.cfg.Attach { + log.Infof("Skip ATTACH; use --attach option to get inside") + // s.SkipCommit() + return s, nil + } - if err := builder.runAndCommit(cmd, "import"); err != nil { - return err + if s.ImageID == "" && !s.NoBaseImage { + return s, fmt.Errorf("Please provide a source image with `FROM` prior to ATTACH") } - builder.lastExportImageID = builder.imageID + cmd := handleJSONArgs(c.cfg.args, c.cfg.attrs) + + if len(cmd) == 0 { + cmd = []string{"/bin/sh"} + } else if !c.cfg.attrs["json"] { + cmd = append([]string{"/bin/sh", "-c"}, cmd...) + } + + // TODO: do s.commit unique + + // We run this command in the container using CMD + + // Backup the config so we can restore it later + origState := s + defer func() { + s = origState + }() + + s.Config.Cmd = cmd + s.Config.Entrypoint = []string{} + s.Config.Tty = true + s.Config.OpenStdin = true + s.Config.StdinOnce = true + s.Config.AttachStdin = true + s.Config.AttachStderr = true + s.Config.AttachStdout = true + + if s.ContainerID, err = b.client.CreateContainer(s); err != nil { + return s, err + } + + if err = b.client.RunContainer(s.ContainerID, true); err != nil { + b.client.RemoveContainer(s.ContainerID) + return s, err + } + + return s, nil +} + +// CommandEnv implements ENV +type CommandEnv struct { + cfg ConfigCommand +} + +func (c *CommandEnv) String() string { + return c.cfg.original +} - return nil +func (c *CommandEnv) ShouldRun(b *Build) (bool, error) { + return true, nil } -// cmdImport implements IMPORT command -// TODO: document behavior of cmdImport -func (builder *Builder) cmdImport(args []string, attributes map[string]bool, flags map[string]string, original string) (err error) { +func (c *CommandEnv) Execute(b *Build) (s State, err error) { + + s = b.state + args := c.cfg.args + if len(args) == 0 { - return fmt.Errorf("Command is missing value: %s", original) + return s, fmt.Errorf("ENV requires at least one argument") } - if builder.lastExportImageID == "" { - return fmt.Errorf("You have to EXPORT something first in order to: %s", original) + + if len(args)%2 != 0 { + // should never get here, but just in case + return s, fmt.Errorf("Bad input to ENV, too many args") } - if builder.exportsContainerID == "" { - return fmt.Errorf("Something went wrong, missing exports container: %s", original) + + commitStr := "ENV" + + for j := 0; j < len(args); j += 2 { + // name ==> args[j] + // value ==> args[j+1] + newVar := strings.Join(args[j:j+2], "=") + commitStr += " " + newVar + + gotOne := false + for i, envVar := range s.Config.Env { + envParts := strings.SplitN(envVar, "=", 2) + if envParts[0] == args[j] { + s.Config.Env[i] = newVar + gotOne = true + break + } + } + if !gotOne { + s.Config.Env = append(s.Config.Env, newVar) + } } - // If only one argument was given to IMPORT, use the same path for destination - // IMPORT /my/dir/file.tar --> ADD ./EXPORT_VOLUME/my/dir/file.tar /my/dir/file.tar - if len(args) < 2 { - args = []string{args[0], "/"} + + s.Commit(commitStr) + + return s, nil +} + +// CommandLabel implements LABEL +type CommandLabel struct { + cfg ConfigCommand +} + +func (c *CommandLabel) String() string { + return c.cfg.original +} + +func (c *CommandLabel) ShouldRun(b *Build) (bool, error) { + return true, nil +} + +func (c *CommandLabel) Execute(b *Build) (s State, err error) { + + s = b.state + args := c.cfg.args + + if len(args) == 0 { + return s, fmt.Errorf("LABEL requires at least one argument") } - dest := args[len(args)-1] // last one is always the dest - // prepare builder mount - builder.addMount(builderMount{ - dest: exportsVolume, - containerID: builder.exportsContainerID, - }) - defer builder.removeLastMount() + if len(args)%2 != 0 { + // should never get here, but just in case + return s, fmt.Errorf("Bad input to LABEL, too many args") + } - // TODO: rsync doesn't work as expected if ENTRYPOINT is inherited by parent image - // STILL RELEVANT? + commitStr := "LABEL" - cmd := []string{"/opt/rsync/bin/rsync", "-a"} - for _, arg := range args[0 : len(args)-1] { - argResolved, err := util.ResolvePath(exportsVolume, arg) - if err != nil { - return fmt.Errorf("Invalid IMPORT source: %s", arg) - } - cmd = append(cmd, argResolved) + if s.Config.Labels == nil { + s.Config.Labels = map[string]string{} } - cmd = append(cmd, dest) - // For caching - builder.addLabels(map[string]string{ - "rocker-lastExportImageId": builder.lastExportImageID, - }) + for j := 0; j < len(args); j++ { + // name ==> args[j] + // value ==> args[j+1] + newVar := args[j] + "=" + args[j+1] + "" + commitStr += " " + newVar + + s.Config.Labels[args[j]] = args[j+1] + j++ + } - // Configure container temporarily, only for this execution - resetFunc := builder.temporaryConfig(func() { - builder.Config.Entrypoint = []string{} - }) - defer resetFunc() + s.Commit(commitStr) - fmt.Fprintf(builder.OutStream, "[Rocker] run: %s\n", strings.Join(cmd, " ")) + return s, nil +} - return builder.runAndCommit(cmd, "import") +// CommandWorkdir implements WORKDIR +type CommandWorkdir struct { + cfg ConfigCommand } -// cmdTag implements TAG command -// TODO: document behavior of cmdTag -func (builder *Builder) cmdTag(args []string, attributes map[string]bool, flags map[string]string, original string) (err error) { - builder.recentTags = []*imagename.ImageName{} - if len(args) == 0 { - return fmt.Errorf("Command is missing value: %s", original) +func (c *CommandWorkdir) String() string { + return c.cfg.original +} + +func (c *CommandWorkdir) ShouldRun(b *Build) (bool, error) { + return true, nil +} + +func (c *CommandWorkdir) Execute(b *Build) (s State, err error) { + + s = b.state + + if len(c.cfg.args) != 1 { + return s, fmt.Errorf("WORKDIR requires exactly one argument") } - image := imagename.NewFromString(args[0]) - // Save rockerfile to label, sot it can be inspected later - if builder.AddMeta && !builder.metaAdded { - data := &RockerImageData{ - ImageName: image, - Rockerfile: builder.RockerfileContent, - Vars: builder.CliVars, - Properties: template.Vars{}, - } + workdir := c.cfg.args[0] - if hostname, _ := os.Hostname(); hostname != "" { - data.Properties["hostname"] = hostname - } - if user, _ := user.Current(); user != nil { - data.Properties["system_login"] = user.Username - data.Properties["system_user"] = user.Name - } + if !filepath.IsAbs(workdir) { + current := s.Config.WorkingDir + workdir = filepath.Join("/", current, workdir) + } - json, err := json.Marshal(data) - if err != nil { - return fmt.Errorf("Failed to marshal rocker data, error: %s", err) - } + s.Config.WorkingDir = workdir - builder.addLabels(map[string]string{ - "rocker-data": string(json), - }) + s.Commit(fmt.Sprintf("WORKDIR %v", workdir)) - fmt.Fprintf(builder.OutStream, "[Rocker] add rocker-data label\n") + return s, nil +} - if err := builder.commitContainer("", builder.Config.Cmd, "LABEL rocker-data"); err != nil { - return err - } +// CommandCmd implements CMD +type CommandCmd struct { + cfg ConfigCommand +} + +func (c *CommandCmd) String() string { + return c.cfg.original +} - builder.metaAdded = true +func (c *CommandCmd) ShouldRun(b *Build) (bool, error) { + return true, nil +} + +func (c *CommandCmd) Execute(b *Build) (s State, err error) { + s = b.state + + cmd := handleJSONArgs(c.cfg.args, c.cfg.attrs) + + if !c.cfg.attrs["json"] { + cmd = append([]string{"/bin/sh", "-c"}, cmd...) } - doTag := func(tag string) error { - img := &imagename.ImageName{ - Registry: image.Registry, - Name: image.Name, - Tag: tag, - } - builder.recentTags = append(builder.recentTags, img) + s.Config.Cmd = cmd - fmt.Fprintf(builder.OutStream, "[Rocker] Tag %.12s -> %s\n", builder.imageID, img) + s.Commit(fmt.Sprintf("CMD %q", cmd)) - err := builder.Docker.TagImage(builder.imageID, docker.TagImageOptions{ - Repo: img.NameWithRegistry(), - Tag: img.GetTag(), - Force: true, - }) - if err != nil { - return fmt.Errorf("Failed to set tag %s to image %s", img, builder.imageID) - } - return nil + if len(c.cfg.args) != 0 { + s.CmdSet = true } - // By default, tag with current branch name if tag is not specified - // do not use :latest unless it was set explicitly - if !image.HasTag() { - if builder.Vars.IsSet("branch") && builder.Vars["branch"].(string) != "" { - image.Tag = builder.Vars["branch"].(string) - } - // Additionally, tag image with current git sha - if builder.Vars.IsSet("commit") && builder.Vars["commit"] != "" { - if err := doTag(fmt.Sprintf("%.7s", builder.Vars["commit"])); err != nil { - return err - } - } + return s, nil +} + +// CommandEntrypoint implements ENTRYPOINT +type CommandEntrypoint struct { + cfg ConfigCommand +} + +func (c *CommandEntrypoint) String() string { + return c.cfg.original +} + +func (c *CommandEntrypoint) ShouldRun(b *Build) (bool, error) { + return true, nil +} + +func (c *CommandEntrypoint) Execute(b *Build) (s State, err error) { + s = b.state + + parsed := handleJSONArgs(c.cfg.args, c.cfg.attrs) + + switch { + case c.cfg.attrs["json"]: + // ENTRYPOINT ["echo", "hi"] + s.Config.Entrypoint = parsed + case len(parsed) == 0: + // ENTRYPOINT [] + s.Config.Entrypoint = nil + default: + // ENTRYPOINT echo hi + s.Config.Entrypoint = []string{"/bin/sh", "-c", parsed[0]} } - // Do the asked tag - if err := doTag(image.GetTag()); err != nil { - return err + s.Commit(fmt.Sprintf("ENTRYPOINT %q", s.Config.Entrypoint)) + + // TODO: test this + // when setting the entrypoint if a CMD was not explicitly set then + // set the command to nil + if !s.CmdSet { + s.Config.Cmd = nil } - // Optionally make a semver aliases - if _, ok := flags["semver"]; ok && image.HasTag() { - ver, err := NewSemver(image.GetTag()) - if err != nil { - return fmt.Errorf("--semver flag expects tag to be in semver format, error: %s", err) - } - // If the version is like 1.2.3-build512 we also want to alias 1.2.3 - if ver.HasSuffix() { - if err := doTag(fmt.Sprintf("%d.%d.%d", ver.Major, ver.Minor, ver.Patch)); err != nil { - return err - } - } - if err := doTag(fmt.Sprintf("%d.%d.x", ver.Major, ver.Minor)); err != nil { - return err + return s, nil +} + +// CommandExpose implements EXPOSE +type CommandExpose struct { + cfg ConfigCommand +} + +func (c *CommandExpose) String() string { + return c.cfg.original +} + +func (c *CommandExpose) ShouldRun(b *Build) (bool, error) { + return true, nil +} + +func (c *CommandExpose) Execute(b *Build) (s State, err error) { + + s = b.state + + if len(c.cfg.args) == 0 { + return s, fmt.Errorf("EXPOSE requires at least one argument") + } + + if s.Config.ExposedPorts == nil { + s.Config.ExposedPorts = map[docker.Port]struct{}{} + } + + ports, _, err := nat.ParsePortSpecs(c.cfg.args) + if err != nil { + return s, err + } + + // instead of using ports directly, we build a list of ports and sort it so + // the order is consistent. This prevents cache burst where map ordering + // changes between builds + portList := make([]string, len(ports)) + var i int + for port := range ports { + dockerPort := docker.Port(port) + if _, exists := s.Config.ExposedPorts[dockerPort]; !exists { + s.Config.ExposedPorts[dockerPort] = struct{}{} } - if err := doTag(fmt.Sprintf("%d.x", ver.Major)); err != nil { - return err + portList[i] = string(port) + i++ + } + sort.Strings(portList) + + message := fmt.Sprintf("EXPOSE %s", strings.Join(portList, " ")) + s.Commit(message) + + return s, nil +} + +// CommandVolume implements VOLUME +type CommandVolume struct { + cfg ConfigCommand +} + +func (c *CommandVolume) String() string { + return c.cfg.original +} + +func (c *CommandVolume) ShouldRun(b *Build) (bool, error) { + return true, nil +} + +func (c *CommandVolume) Execute(b *Build) (s State, err error) { + + s = b.state + + if len(c.cfg.args) == 0 { + return s, fmt.Errorf("VOLUME requires at least one argument") + } + + if s.Config.Volumes == nil { + s.Config.Volumes = map[string]struct{}{} + } + for _, v := range c.cfg.args { + v = strings.TrimSpace(v) + if v == "" { + return s, fmt.Errorf("Volume specified can not be an empty string") } + s.Config.Volumes[v] = struct{}{} } - return nil + s.Commit(fmt.Sprintf("VOLUME %v", c.cfg.args)) + + return s, nil +} + +// CommandUser implements USER +type CommandUser struct { + cfg ConfigCommand +} + +func (c *CommandUser) String() string { + return c.cfg.original +} + +func (c *CommandUser) ShouldRun(b *Build) (bool, error) { + return true, nil } -// cmdPush implements PUSH command -// TODO: document behavior of cmdPush -func (builder *Builder) cmdPush(args []string, attributes map[string]bool, flags map[string]string, original string) (err error) { - if err := builder.cmdTag(args, attributes, flags, original); err != nil { - return fmt.Errorf("Failed to tag image, error: %s", err) +func (c *CommandUser) Execute(b *Build) (s State, err error) { + + s = b.state + + if len(c.cfg.args) != 1 { + return s, fmt.Errorf("USER requires exactly one argument") } - if !builder.Push { - fmt.Fprintf(builder.OutStream, "[Rocker] *** just tagged; pass --push flag to actually push to a registry\n") - return nil + s.Config.User = c.cfg.args[0] + + s.Commit(fmt.Sprintf("USER %v", c.cfg.args)) + + return s, nil +} + +// CommandOnbuild implements ONBUILD +type CommandOnbuild struct { + cfg ConfigCommand +} + +func (c *CommandOnbuild) String() string { + return c.cfg.original +} + +func (c *CommandOnbuild) ShouldRun(b *Build) (bool, error) { + return true, nil +} + +func (c *CommandOnbuild) Execute(b *Build) (s State, err error) { + + s = b.state + + if len(c.cfg.args) == 0 { + return s, fmt.Errorf("ONBUILD requires at least one argument") } - for _, image := range builder.recentTags { - fmt.Fprintf(builder.OutStream, "[Rocker] Push %.12s -> %s\n", builder.imageID, image) + command := strings.ToUpper(strings.TrimSpace(c.cfg.args[0])) + switch command { + case "ONBUILD": + return s, fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") + case "MAINTAINER", "FROM": + return s, fmt.Errorf("%s isn't allowed as an ONBUILD trigger", command) + } - digest, err := builder.pushImage(*image) - if err != nil { - return err - } + orig := regexp.MustCompile(`(?i)^\s*ONBUILD\s*`).ReplaceAllString(c.cfg.original, "") - if builder.ArtifactsPath != "" { - if err := os.MkdirAll(builder.ArtifactsPath, 0755); err != nil { - return fmt.Errorf("Failed to create directory %s for the artifacts, error: %s", builder.ArtifactsPath, err) - } - filePath := filepath.Join(builder.ArtifactsPath, image.GetTag()) - lines := []string{ - fmt.Sprintf("Name: %s", image), - fmt.Sprintf("Tag: %s", image.GetTag()), - fmt.Sprintf("ImageID: %s", builder.imageID), - fmt.Sprintf("Digest: %s", digest), - fmt.Sprintf("Addressable: %s@%s", image.NameWithRegistry(), digest), - } - content := []byte(strings.Join(lines, "\n") + "\n") + s.Config.OnBuild = append(s.Config.OnBuild, orig) + s.Commit(fmt.Sprintf("ONBUILD %s", orig)) - if err := ioutil.WriteFile(filePath, content, 0644); err != nil { - return fmt.Errorf("Failed to write artifact file %s, error: %s", filePath, err) - } + return s, nil +} - fmt.Fprintf(builder.OutStream, "[Rocker] Save artifact file %s\n", filePath) - } +// CommandTag implements TAG +type CommandTag struct { + cfg ConfigCommand +} + +func (c *CommandTag) String() string { + return c.cfg.original +} + +func (c *CommandTag) ShouldRun(b *Build) (bool, error) { + return true, nil +} + +func (c *CommandTag) Execute(b *Build) (State, error) { + if len(c.cfg.args) != 1 { + return b.state, fmt.Errorf("TAG requires exactly one argument") } - return nil + if b.state.ImageID == "" { + return b.state, fmt.Errorf("Cannot TAG on empty image") + } + + if err := b.client.TagImage(b.state.ImageID, c.cfg.args[0]); err != nil { + return b.state, err + } + + return b.state, nil } -// cmdRequire implements REQUIRE command -// TODO: document behavior of cmdRequire -func (builder *Builder) cmdRequire(args []string, attributes map[string]bool, flags map[string]string, original string) (err error) { - if len(args) == 0 { - return fmt.Errorf("Command is missing value: %s", original) +// CommandPush implements PUSH +type CommandPush struct { + cfg ConfigCommand +} + +func (c *CommandPush) String() string { + return c.cfg.original +} + +func (c *CommandPush) ShouldRun(b *Build) (bool, error) { + return true, nil +} + +func (c *CommandPush) Execute(b *Build) (State, error) { + if len(c.cfg.args) != 1 { + return b.state, fmt.Errorf("PUSH requires exactly one argument") } - for _, requireVar := range args { - if !builder.Vars.IsSet(requireVar) { - return fmt.Errorf("Var $%s is required but not set", requireVar) - } + + if b.state.ImageID == "" { + return b.state, fmt.Errorf("Cannot PUSH empty image") + } + + if err := b.client.TagImage(b.state.ImageID, c.cfg.args[0]); err != nil { + return b.state, err + } + + if !b.cfg.Push { + log.Infof("| Don't push. Pass --push flag to actually push to the registry") + return b.state, nil + } + + if err := b.client.PushImage(c.cfg.args[0]); err != nil { + return b.state, err } - return nil + + return b.state, nil } -// cmdVar implements VAR command -// it is deprecated due to templating functionality, see: https://github.com/grammarly/rocker#templating -func (builder *Builder) cmdVar(args []string, attributes map[string]bool, flags map[string]string, original string) (err error) { - if len(args) == 0 { - return fmt.Errorf("Command is missing value: %s", original) +// CommandCopy implements COPY +type CommandCopy struct { + cfg ConfigCommand +} + +func (c *CommandCopy) String() string { + return c.cfg.original +} + +func (c *CommandCopy) ShouldRun(b *Build) (bool, error) { + return true, nil +} + +func (c *CommandCopy) Execute(b *Build) (State, error) { + if len(c.cfg.args) < 2 { + return b.state, fmt.Errorf("COPY requires at least two arguments") + } + return copyFiles(b, c.cfg.args, "COPY") +} + +// CommandAdd implements ADD +// For now it is an alias of COPY, but later will add urls and archives to it +type CommandAdd struct { + cfg ConfigCommand +} + +func (c *CommandAdd) String() string { + return c.cfg.original +} + +func (c *CommandAdd) ShouldRun(b *Build) (bool, error) { + return true, nil +} + +func (c *CommandAdd) Execute(b *Build) (State, error) { + if len(c.cfg.args) < 2 { + return b.state, fmt.Errorf("ADD requires at least two arguments") + } + return copyFiles(b, c.cfg.args, "ADD") +} + +// CommandMount implements MOUNT +type CommandMount struct { + cfg ConfigCommand +} + +func (c *CommandMount) String() string { + return c.cfg.original +} + +func (c *CommandMount) ShouldRun(b *Build) (bool, error) { + return true, nil +} + +func (c *CommandMount) Execute(b *Build) (s State, err error) { + + s = b.state + + if len(c.cfg.args) == 0 { + return b.state, fmt.Errorf("MOUNT requires at least one argument") } - for i := 0; i < len(args); i += 2 { - key := args[i] - value := args[i+1] - if !builder.Vars.IsSet(key) { - builder.Vars[key] = value + + commitIds := []string{} + + for _, arg := range c.cfg.args { + + switch strings.Contains(arg, ":") { + // MOUNT src:dest + case true: + var ( + pair = strings.SplitN(arg, ":", 2) + src = pair[0] + dest = pair[1] + err error + ) + + // Process relative paths in volumes + if strings.HasPrefix(src, "~") { + src = strings.Replace(src, "~", os.Getenv("HOME"), 1) + } + if !path.IsAbs(src) { + src = path.Join(b.cfg.ContextDir, src) + } + + if src, err = b.client.ResolveHostPath(src); err != nil { + return s, err + } + + if s.HostConfig.Binds == nil { + s.HostConfig.Binds = []string{} + } + + s.HostConfig.Binds = append(s.HostConfig.Binds, src+":"+dest) + commitIds = append(commitIds, arg) + + // MOUNT dir + case false: + name, err := b.getVolumeContainer(arg) + if err != nil { + return s, err + } + + if s.HostConfig.VolumesFrom == nil { + s.HostConfig.VolumesFrom = []string{} + } + + s.HostConfig.VolumesFrom = append(s.HostConfig.VolumesFrom, name) + commitIds = append(commitIds, name+":"+arg) } } - return nil + + s.Commit(fmt.Sprintf("MOUNT %q", commitIds)) + + return s, nil +} + +// CommandExport implements EXPORT +type CommandExport struct { + cfg ConfigCommand +} + +func (c *CommandExport) String() string { + return c.cfg.original } -// cmdInclude implements INCLUDE command -// TODO: document behavior of cmdInclude -func (builder *Builder) cmdInclude(args []string, attributes map[string]bool, flags map[string]string, original string) (err error) { +func (c *CommandExport) ShouldRun(b *Build) (bool, error) { + return true, nil +} + +func (c *CommandExport) Execute(b *Build) (s State, err error) { + + s = b.state + args := c.cfg.args + if len(args) == 0 { - return fmt.Errorf("Command is missing value: %s", original) + return s, fmt.Errorf("EXPORT requires at least one argument") } - module := args[0] - contextDir := filepath.Dir(builder.Rockerfile) - resultPath := filepath.Clean(path.Join(contextDir, module)) + // If only one argument was given to EXPORT, use basename of a file + // EXPORT /my/dir/file.tar --> /EXPORT_VOLUME/file.tar + if len(args) < 2 { + args = []string{args[0], "/"} + } - // TODO: protect against going out of working directory? + src := args[0 : len(args)-1] + dest := args[len(args)-1] // last one is always the dest - stat, err := os.Stat(resultPath) + // EXPORT /my/dir my_dir --> /EXPORT_VOLUME/my_dir + // EXPORT /my/dir /my_dir --> /EXPORT_VOLUME/my_dir + // EXPORT /my/dir stuff/ --> /EXPORT_VOLUME/stuff/my_dir + // EXPORT /my/dir /stuff/ --> /EXPORT_VOLUME/stuff/my_dir + // EXPORT /my/dir/* / --> /EXPORT_VOLUME/stuff/my_dir + + exportsContainerID, err := b.getExportsContainer() if err != nil { - return err - } - if !stat.Mode().IsRegular() { - return fmt.Errorf("Expected included resource to be a regular file: %s (%s)", module, original) + return s, err } - fd, err := os.Open(resultPath) + // build the command + cmdDestPath, err := util.ResolvePath(ExportsPath, dest) if err != nil { - return err + return s, fmt.Errorf("Invalid EXPORT destination: %s", dest) } - defer fd.Close() - includedNode, err := parser.Parse(fd) + s.Commit("EXPORT %q to %.12s:%s", src, exportsContainerID, dest) + + s, hit, err := b.probeCache(s) if err != nil { - return err + return s, err } - - for _, node := range includedNode.Children { - if node.Value == "include" { - return fmt.Errorf("Nesting includes is not allowed: \"%s\" in %s", original, resultPath) - } + if hit { + b.exports = append(b.exports, s.ExportsID) + return s, nil } - // inject included commands info root node at current execution position - after := append(includedNode.Children, builder.rootNode.Children[builder.i+1:]...) - builder.rootNode.Children = append(builder.rootNode.Children[:builder.i], after...) - builder.i-- + // Remember original stuff so we can restore it when we finished + var exportsId string + origState := s - return nil -} + defer func() { + s = origState + s.ExportsID = exportsId + b.exports = append(b.exports, exportsId) + }() -// cmdAttach implements ATTACH command -// TODO: document behavior of cmdAttach -func (builder *Builder) cmdAttach(args []string, attributes map[string]bool, flags map[string]string, original string) (err error) { - // simply ignore this command if we don't wanna attach - if !builder.Attach { - fmt.Fprintf(builder.OutStream, "[Rocker] Skipping ATTACH; use --attach option to get inside\n") - return nil + // Append exports container as a volume + s.HostConfig.VolumesFrom = []string{exportsContainerID} + + cmd := []string{"/opt/rsync/bin/rsync", "-a", "--delete-during"} + + if b.cfg.Verbose { + cmd = append(cmd, "--verbose") } - cmd := handleJSONArgs(args, attributes) + cmd = append(cmd, src...) + cmd = append(cmd, cmdDestPath) - if len(cmd) > 0 { - if !attributes["json"] { - cmd = append([]string{"/bin/sh", "-c"}, cmd...) - } - } else { - cmd = builder.Config.Cmd + s.Config.Cmd = cmd + s.Config.Entrypoint = []string{} + + if exportsId, err = b.client.CreateContainer(s); err != nil { + return s, err } + defer b.client.RemoveContainer(exportsId) + + log.Infof("| Running in %.12s: %s", exportsId, strings.Join(cmd, " ")) - // Mount exports container if there is one - if builder.exportsContainerID != "" { - builder.addMount(builderMount{ - dest: exportsVolume, - containerID: builder.exportsContainerID, - }) - defer builder.removeLastMount() + if err = b.client.RunContainer(exportsId, false); err != nil { + return s, err } - var name string - if _, ok := flags["name"]; ok { - if flags["name"] == "" { - return fmt.Errorf("flag --name needs a value: %s", original) - } - name = flags["name"] + return s, nil +} + +// CommandImport implements IMPORT +type CommandImport struct { + cfg ConfigCommand +} + +func (c *CommandImport) String() string { + return c.cfg.original +} + +func (c *CommandImport) ShouldRun(b *Build) (bool, error) { + return true, nil +} + +func (c *CommandImport) Execute(b *Build) (s State, err error) { + s = b.state + args := c.cfg.args + + if len(args) == 0 { + return s, fmt.Errorf("IMPORT requires at least one argument") + } + if len(b.exports) == 0 { + return s, fmt.Errorf("You have to EXPORT something first in order to IMPORT") } - if _, ok := flags["hostname"]; ok && flags["hostname"] == "" { - return fmt.Errorf("flag --hostname needs a value: %s", original) + // TODO: EXPORT and IMPORT cache is not invalidated properly in between + // different tracks of the same build. The EXPORT may be cached + // because it was built earlier with the same prerequisites, but the actual + // data in the exports container may be from the latest EXPORT of different + // build. So we need to prefix ~/.rocker_exports dir with some id somehow. + + log.Infof("| Import from %s", b.exportsContainerName()) + + // If only one argument was given to IMPORT, use the same path for destination + // IMPORT /my/dir/file.tar --> ADD ./EXPORT_VOLUME/my/dir/file.tar /my/dir/file.tar + if len(args) < 2 { + args = []string{args[0], "/"} } + dest := args[len(args)-1] // last one is always the dest + src := []string{} - // Configure container temporarily, only for this execution - resetFunc := builder.temporaryConfig(func() { - if _, ok := flags["hostname"]; ok { - builder.Config.Hostname = flags["hostname"] + for _, arg := range args[0 : len(args)-1] { + argResolved, err := util.ResolvePath(ExportsPath, arg) + if err != nil { + return s, fmt.Errorf("Invalid IMPORT source: %s", arg) } - builder.Config.Cmd = cmd - builder.Config.Entrypoint = []string{} - builder.Config.Tty = true - builder.Config.OpenStdin = true - builder.Config.StdinOnce = true - builder.Config.AttachStdin = true - builder.Config.AttachStderr = true - builder.Config.AttachStdout = true - }) - defer resetFunc() - - containerID, err := builder.createContainer(name) + src = append(src, argResolved) + } + + sort.Strings(b.exports) + s.Commit("IMPORT %q : %q %s", b.exports, src, dest) + + // Check cache + s, hit, err := b.probeCache(s) if err != nil { - return fmt.Errorf("Failed to create container, error: %s", err) + return s, err + } + if hit { + return s, nil } + + // Remember original stuff so we can restore it when we finished + origState := s + + var importID string + defer func() { - if err2 := builder.removeContainer(containerID); err2 != nil && err == nil { - err = err2 - } + s = origState + s.ContainerID = importID }() - if err := builder.runContainerAttachStdin(containerID, true); err != nil { - return fmt.Errorf("Failed to run attached container %s, error: %s", containerID, err) + cmd := []string{"/opt/rsync/bin/rsync", "-a"} + + if b.cfg.Verbose { + cmd = append(cmd, "--verbose") } - return nil + cmd = append(cmd, src...) + cmd = append(cmd, dest) + + s.Config.Cmd = cmd + s.Config.Entrypoint = []string{} + s.HostConfig.VolumesFrom = []string{b.exportsContainerName()} + + if importID, err = b.client.CreateContainer(s); err != nil { + return s, err + } + + log.Infof("| Running in %.12s: %s", importID, strings.Join(cmd, " ")) + + if err = b.client.RunContainer(importID, false); err != nil { + return s, err + } + + // TODO: if b.exportsCacheBusted and IMPORT cache was invalidated, + // CommitCommand then caches it anyway. + + return s, nil +} + +// CommandOnbuildWrap wraps ONBUILD command +type CommandOnbuildWrap struct { + cmd Command +} + +func (c *CommandOnbuildWrap) String() string { + return "ONBUILD " + c.cmd.String() +} + +func (c *CommandOnbuildWrap) ShouldRun(b *Build) (bool, error) { + return true, nil +} + +func (c *CommandOnbuildWrap) Execute(b *Build) (State, error) { + return c.cmd.Execute(b) } diff --git a/src/rocker/build2/commands_test.go b/src/rocker/build/commands_test.go similarity index 99% rename from src/rocker/build2/commands_test.go rename to src/rocker/build/commands_test.go index 352acfd7..78a156bb 100644 --- a/src/rocker/build2/commands_test.go +++ b/src/rocker/build/commands_test.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package build2 +package build import ( "fmt" diff --git a/src/rocker/build2/compare.go b/src/rocker/build/compare.go similarity index 99% rename from src/rocker/build2/compare.go rename to src/rocker/build/compare.go index 895f3326..1d94046b 100644 --- a/src/rocker/build2/compare.go +++ b/src/rocker/build/compare.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package build2 +package build import "github.com/fsouza/go-dockerclient" diff --git a/src/rocker/build/config.go b/src/rocker/build/config.go deleted file mode 100644 index 04d9cf80..00000000 --- a/src/rocker/build/config.go +++ /dev/null @@ -1,95 +0,0 @@ -/*- - * Copyright 2015 Grammarly, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package build - -import "github.com/fsouza/go-dockerclient" - -// CompareConfigs compares two Config struct. Does not compare the "Image" nor "Hostname" fields -// If OpenStdin is set, then it differs -func CompareConfigs(a, b *docker.Config) bool { - // Experimental: do not consider rocker-data labels when comparing - if _, ok := a.Labels["rocker-data"]; ok { - tmp := a.Labels["rocker-data"] - delete(a.Labels, "rocker-data") - defer func() { a.Labels["rocker-data"] = tmp }() - } - if _, ok := b.Labels["rocker-data"]; ok { - tmp := b.Labels["rocker-data"] - delete(b.Labels, "rocker-data") - defer func() { b.Labels["rocker-data"] = tmp }() - } - - if a == nil || b == nil || - a.OpenStdin || b.OpenStdin { - return false - } - - if a.AttachStdout != b.AttachStdout || - a.AttachStderr != b.AttachStderr || - a.User != b.User || - a.OpenStdin != b.OpenStdin || - a.Tty != b.Tty { - return false - } - - if len(a.Cmd) != len(b.Cmd) || - len(a.Env) != len(b.Env) || - len(a.Labels) != len(b.Labels) || - len(a.PortSpecs) != len(b.PortSpecs) || - len(a.ExposedPorts) != len(b.ExposedPorts) || - len(a.Entrypoint) != len(b.Entrypoint) || - len(a.Volumes) != len(b.Volumes) { - return false - } - - for i := 0; i < len(a.Cmd); i++ { - if a.Cmd[i] != b.Cmd[i] { - return false - } - } - for i := 0; i < len(a.Env); i++ { - if a.Env[i] != b.Env[i] { - return false - } - } - for k, v := range a.Labels { - if v != b.Labels[k] { - return false - } - } - for i := 0; i < len(a.PortSpecs); i++ { - if a.PortSpecs[i] != b.PortSpecs[i] { - return false - } - } - for k := range a.ExposedPorts { - if _, exists := b.ExposedPorts[k]; !exists { - return false - } - } - for i := 0; i < len(a.Entrypoint); i++ { - if a.Entrypoint[i] != b.Entrypoint[i] { - return false - } - } - for key := range a.Volumes { - if _, exists := b.Volumes[key]; !exists { - return false - } - } - return true -} diff --git a/src/rocker/build2/container_formatter.go b/src/rocker/build/container_formatter.go similarity index 98% rename from src/rocker/build2/container_formatter.go rename to src/rocker/build/container_formatter.go index 9509416e..b3f79439 100644 --- a/src/rocker/build2/container_formatter.go +++ b/src/rocker/build/container_formatter.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package build2 +package build import ( "fmt" diff --git a/src/rocker/build/containers.go b/src/rocker/build/containers.go deleted file mode 100644 index cdb057af..00000000 --- a/src/rocker/build/containers.go +++ /dev/null @@ -1,292 +0,0 @@ -/*- - * Copyright 2015 Grammarly, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package build - -import ( - "fmt" - "io" - "os" - "os/signal" - - "rocker/util" - - "github.com/docker/docker/pkg/term" - "github.com/fsouza/go-dockerclient" -) - -func (builder *Builder) runAndCommit(cmd []string, comment string) error { - // set Cmd manually, this is special case only for Dockerfiles - origCmd := builder.Config.Cmd - clearFunc := builder.temporaryCmd(cmd) - defer clearFunc() - - hit, err := builder.probeCache() - if err != nil { - return err - } - if hit { - return nil - } - - containerID, err := builder.createContainer("") - if err != nil { - return fmt.Errorf("Failed to create container, error: %s", err) - } - defer func() { - if err2 := builder.removeContainer(containerID); err2 != nil && err == nil { - err = err2 - } - }() - - if err := builder.runContainer(containerID); err != nil { - return fmt.Errorf("Failed to run container %s, error: %s", containerID, err) - } - - return builder.commitContainer(containerID, origCmd, comment) -} - -func (builder *Builder) createContainer(name string) (string, error) { - volumesFrom := builder.getMountContainerIds() - binds := builder.getBinds() - - builder.Config.Image = builder.imageID - - opts := docker.CreateContainerOptions{ - Name: name, - Config: builder.Config, - HostConfig: &docker.HostConfig{ - Binds: binds, - VolumesFrom: volumesFrom, - }, - } - - container, err := builder.Docker.CreateContainer(opts) - if err != nil { - return "", err - } - - fmt.Fprintf(builder.OutStream, "[Rocker] ---> Running in %.12s (image id = %.12s)\n", container.ID, builder.imageID) - - return container.ID, nil -} - -func (builder *Builder) removeContainer(containerID string) error { - fmt.Fprintf(builder.OutStream, "[Rocker] Removing intermediate container %.12s\n", containerID) - // TODO: always force? - return builder.Docker.RemoveContainer(docker.RemoveContainerOptions{ID: containerID, Force: true}) -} - -func (builder *Builder) runContainer(containerID string) error { - return builder.runContainerAttachStdin(containerID, false) -} - -func (builder *Builder) runContainerAttachStdin(containerID string, attachStdin bool) error { - success := make(chan struct{}) - - attachOpts := docker.AttachToContainerOptions{ - Container: containerID, - OutputStream: util.PrefixPipe("[Docker] ", builder.OutStream), - ErrorStream: util.PrefixPipe("[Docker] ", builder.OutStream), - Stdout: true, - Stderr: true, - Stream: true, - Success: success, - } - - if attachStdin { - if !builder.isTerminalIn { - return fmt.Errorf("Cannot attach to a container on non tty input") - } - oldState, err := term.SetRawTerminal(builder.fdIn) - if err != nil { - return err - } - defer term.RestoreTerminal(builder.fdIn, oldState) - - attachOpts.InputStream = readerVoidCloser{builder.InStream} - attachOpts.OutputStream = builder.OutStream - attachOpts.ErrorStream = builder.OutStream - attachOpts.Stdin = true - attachOpts.RawTerminal = true - } - - finished := make(chan struct{}, 1) - - go func() { - if err := builder.Docker.AttachToContainer(attachOpts); err != nil { - select { - case <-finished: - // Ignore any attach errors when we have finished already. - // It may happen if we attach stdin, then container exit, but then there is other input from stdin continues. - // This is the case when multiple ATTACH command are used in a single Rockerfile. - // The problem though is that we cannot close stdin, to have it available for the subsequent ATTACH; - // therefore, hijack goroutine from the previous ATTACH will hang until the input received and then - // it will fire an error. - // It's ok for `rocker` since it is not a daemon, but rather a one-off command. - // - // Also, there is still a problem that `rocker` loses second character from the Stdin in a second ATTACH. - // But let's consider it a corner case. - default: - // Print the error. We cannot return it because the main routine is handing on WaitContaienr - fmt.Fprintf(builder.OutStream, "Got error while attaching to container %s: %s\n", containerID, err) - } - } - }() - - success <- <-success - - if err := builder.Docker.StartContainer(containerID, &docker.HostConfig{}); err != nil { - return err - } - - if attachStdin { - if err := builder.monitorTtySize(containerID); err != nil { - return fmt.Errorf("Failed to monitor TTY size for container %s, error: %s", containerID, err) - } - } - - sigch := make(chan os.Signal, 1) - signal.Notify(sigch, os.Interrupt) - - errch := make(chan error) - - go func() { - statusCode, err := builder.Docker.WaitContainer(containerID) - if err != nil { - errch <- err - } else if statusCode != 0 { - errch <- fmt.Errorf("Failed to run container, exit with code %d", statusCode) - } - errch <- nil - return - }() - - select { - case err := <-errch: - // indicate 'finished' so the `attach` goroutine will not give any errors - finished <- struct{}{} - if err != nil { - return err - } - case <-sigch: - fmt.Fprintf(builder.OutStream, "[Rocker] Received SIGINT, remove current container...\n") - if err := builder.removeContainer(containerID); err != nil { - fmt.Fprintf(builder.OutStream, "[Rocker] Failed to remove container: %s\n", err) - } - // TODO: send signal to builder.Build() and have a proper cleanup - os.Exit(2) - } - - return nil -} - -func (builder *Builder) commitContainer(containerID string, autoCmd []string, comment string) (err error) { - - if containerID == "" { - clearFunc := builder.temporaryCmd([]string{"/bin/sh", "-c", "#(nop) " + comment}) - defer clearFunc() - - hit, err := builder.probeCache() - if err != nil { - return err - } - if hit { - return nil - } - - containerID, err = builder.createContainer("") - if err != nil { - return err - } - - defer func() { - if err2 := builder.removeContainer(containerID); err2 != nil && err == nil { - err = err2 - } - }() - } - - // clone the struct - autoConfig := *builder.Config - autoConfig.Cmd = autoCmd - - commitOpts := docker.CommitContainerOptions{ - Container: containerID, - Message: "", - Run: &autoConfig, - } - - image, err := builder.Docker.CommitContainer(commitOpts) - if err != nil { - return err - } - - builder.imageID = image.ID - - return nil -} - -func (builder *Builder) ensureContainer(containerName string, config *docker.Config, purpose string) (*docker.Container, error) { - // Check if container exists - container, err := builder.Docker.InspectContainer(containerName) - - // No data volume container for this build, create it - if _, ok := err.(*docker.NoSuchContainer); ok { - - if err := builder.ensureImage(config.Image, purpose); err != nil { - return container, fmt.Errorf("Failed to check image %s, error: %s", config.Image, err) - } - - fmt.Fprintf(builder.OutStream, "[Rocker] Create container: %s for %s\n", containerName, purpose) - - createOpts := docker.CreateContainerOptions{ - Name: containerName, - Config: config, - } - - container, err = builder.Docker.CreateContainer(createOpts) - if err != nil { - return container, fmt.Errorf("Failed to create container %s from image %s, error: %s", containerName, config.Image, err) - } - } else if err == nil { - fmt.Fprintf(builder.OutStream, "[Rocker] Use existing container: %s for %s\n", containerName, purpose) - } - - return container, err -} - -// readerVoidCloser is a hack of the improved go-dockerclient's hijacking behavior -// It simply wraps io.Reader (os.Stdin in our case) and discards any Close() call. -// -// It's important because we don't want to close os.Stdin for two reasons: -// 1. We need to restore the terminal back from the raw mode after ATTACH -// 2. There can be other ATTACH instructions for which we need an open stdin -// -// See additional notes in the runContainerAttachStdin() function -type readerVoidCloser struct { - reader io.Reader -} - -// Read reads from current reader -func (r readerVoidCloser) Read(p []byte) (int, error) { - return r.reader.Read(p) -} - -// Close is a viod function, does nothing -func (r readerVoidCloser) Close() error { - return nil -} diff --git a/src/rocker/build2/copy.go b/src/rocker/build/copy.go similarity index 99% rename from src/rocker/build2/copy.go rename to src/rocker/build/copy.go index 89810d82..653af545 100644 --- a/src/rocker/build2/copy.go +++ b/src/rocker/build/copy.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package build2 +package build import ( "archive/tar" diff --git a/src/rocker/build2/copy_test.go b/src/rocker/build/copy_test.go similarity index 99% rename from src/rocker/build2/copy_test.go rename to src/rocker/build/copy_test.go index 0ba7a14c..8e932026 100644 --- a/src/rocker/build2/copy_test.go +++ b/src/rocker/build/copy_test.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package build2 +package build import ( "bytes" diff --git a/src/rocker/build2/dockerignore.go b/src/rocker/build/dockerignore.go similarity index 99% rename from src/rocker/build2/dockerignore.go rename to src/rocker/build/dockerignore.go index c29f2046..265ff5fb 100644 --- a/src/rocker/build2/dockerignore.go +++ b/src/rocker/build/dockerignore.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package build2 +package build import ( "bufio" diff --git a/src/rocker/build2/dockerignore_test.go b/src/rocker/build/dockerignore_test.go similarity index 98% rename from src/rocker/build2/dockerignore_test.go rename to src/rocker/build/dockerignore_test.go index c673c0f3..082395e8 100644 --- a/src/rocker/build2/dockerignore_test.go +++ b/src/rocker/build/dockerignore_test.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package build2 +package build import ( "strings" diff --git a/src/rocker/build/imagedata.go b/src/rocker/build/imagedata.go deleted file mode 100644 index ae27acef..00000000 --- a/src/rocker/build/imagedata.go +++ /dev/null @@ -1,75 +0,0 @@ -/*- - * Copyright 2015 Grammarly, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package build - -import ( - "encoding/json" - "fmt" - "log" - "rocker/imagename" - "rocker/template" - "time" - - "github.com/fatih/color" -) - -// RockerImageData provides metadata for images built with Rocker -// It can be attached to a container label called "rocker-data" if -// --meta flag was given to `rocker build` -type RockerImageData struct { - ImageName *imagename.ImageName - Rockerfile string - Vars template.Vars - Properties template.Vars - Created time.Time -} - -// PrettyString returns RockerImageData as a printable string -func (data *RockerImageData) PrettyString() string { - prettyVars, err := json.MarshalIndent(data.Vars, "", " ") - if err != nil { - log.Fatal(err) - } - prettyProps, err := json.MarshalIndent(data.Properties, "", " ") - if err != nil { - log.Fatal(err) - } - green := color.New(color.FgGreen).SprintfFunc() - yellow := color.New(color.FgYellow).SprintfFunc() - sep := "=======================================================\n" - - res := fmt.Sprintf("%s%s\n", green(sep), - green("Image: %s", data.ImageName.String())) - - if !data.Created.IsZero() { - res = fmt.Sprintf("%sCreated: %s\n", res, data.Created.Format(time.RFC850)) - } - - if data.Properties != nil { - res = fmt.Sprintf("%sProperties: %s\n", res, prettyProps) - } - - if data.Vars != nil { - res = fmt.Sprintf("%sVars: %s\n", res, prettyVars) - } - - if data.Rockerfile != "" { - res = fmt.Sprintf("%s%s\n%s\n%s\n%s", res, yellow("Rockerfile:"), yellow(sep), data.Rockerfile, yellow(sep)) - } - - return res -} diff --git a/src/rocker/build/internals.go b/src/rocker/build/internals.go deleted file mode 100644 index 2a6f207e..00000000 --- a/src/rocker/build/internals.go +++ /dev/null @@ -1,477 +0,0 @@ -/*- - * Copyright 2015 Grammarly, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package build - -import ( - "bufio" - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - "path" - "regexp" - "strings" - "time" - - "rocker/imagename" - "rocker/parser" - - "github.com/docker/docker/pkg/jsonmessage" - "github.com/fsouza/go-dockerclient" -) - -var ( - captureImageID = regexp.MustCompile("Successfully built ([a-f0-9]{12})") - captureDigest = regexp.MustCompile("digest:\\s*(sha256:[a-f0-9]{64})") -) - -func (builder *Builder) checkDockerignore() (err error) { - ignoreLines := []string{ - ".dockerignore", - builder.getTmpPrefix() + "*", - builder.rockerfileRelativePath(), - } - dockerignoreFile := path.Join(builder.ContextDir, ".dockerignore") - - // everything is easy, we just need to create one - if _, err := os.Stat(dockerignoreFile); os.IsNotExist(err) { - fmt.Fprintf(builder.OutStream, "[Rocker] Create .dockerignore in context directory\n") - newLines := append([]string{ - "# This file is automatically generated by Rocker, please keep it", - }, ignoreLines...) - return ioutil.WriteFile(dockerignoreFile, []byte(strings.Join(newLines, "\n")+"\n"), 0644) - } - - // more difficult, find missing lines - file, err := os.Open(dockerignoreFile) - if err != nil { - return err - } - defer file.Close() - - // read current .dockerignore and filter those ignoreLines which are already there - scanner := bufio.NewScanner(file) - newLines := []string{} - for scanner.Scan() { - currentLine := scanner.Text() - newLines = append(newLines, currentLine) - if currentLine == ".git" { - builder.gitIgnored = true - } - for i, ignoreLine := range ignoreLines { - if ignoreLine == currentLine { - ignoreLines = append(ignoreLines[:i], ignoreLines[i+1:]...) - break - } - } - } - - if err := scanner.Err(); err != nil { - return err - } - - // if we have still something to add - do it - if len(ignoreLines) > 0 { - newLines = append(newLines, ignoreLines...) - fmt.Fprintf(builder.OutStream, "[Rocker] Add %d lines to .dockerignore\n", len(ignoreLines)) - return ioutil.WriteFile(dockerignoreFile, []byte(strings.Join(newLines, "\n")+"\n"), 0644) - } - - return nil -} - -func (builder *Builder) runDockerfile() (err error) { - if len(builder.dockerfile.Children) == 0 { - return nil - } - - // HACK: skip if all we have is "FROM scratch", we need to do something - // to produce actual layer with ID, so create dummy LABEL layer - // maybe there is a better solution, but keep this for a while - if len(builder.dockerfile.Children) == 1 && - builder.dockerfile.Children[0].Value == "from" && - builder.dockerfile.Children[0].Next.Value == "scratch" { - - builder.dockerfile.Children = append(builder.dockerfile.Children, &parser.Node{ - Value: "label", - Next: &parser.Node{ - Value: "ROCKER_SCRATCH=1", - }, - }) - } - - pull := builder.Pull - - // missing from, use latest image sha - if builder.dockerfile.Children[0].Value != "from" { - if builder.imageID == "" { - return fmt.Errorf("Missing initial FROM instruction") - } - fromNode := &parser.Node{ - Value: "from", - Next: &parser.Node{ - Value: builder.imageID, - }, - } - pull = false - builder.dockerfile.Children = append([]*parser.Node{fromNode}, builder.dockerfile.Children...) - } - - // Write Dockerfile to a context - dockerfileName := builder.dockerfileName() - dockerfilePath := path.Join(builder.ContextDir, dockerfileName) - - dockerfileContent, err := RockerfileAstToString(builder.dockerfile) - if err != nil { - return err - } - - if err := ioutil.WriteFile(dockerfilePath, []byte(dockerfileContent), 0644); err != nil { - return err - } - defer os.Remove(dockerfilePath) - - // TODO: here we can make a hint to a user, if the context directory is very large, - // suggest to add some stuff to .dockerignore, etc - - pipeReader, pipeWriter := io.Pipe() - - var buf bytes.Buffer - outStream := io.MultiWriter(pipeWriter, &buf) - - // TODO: consider ForceRmTmpContainer: true - opts := docker.BuildImageOptions{ - Dockerfile: dockerfileName, - OutputStream: outStream, - ContextDir: builder.ContextDir, - NoCache: !builder.UtilizeCache, - Auth: *builder.Auth, - Pull: pull, - RawJSONStream: true, - } - - errch := make(chan error) - - go func() { - err := builder.Docker.BuildImage(opts) - - if err := pipeWriter.Close(); err != nil { - fmt.Fprintf(builder.OutStream, "pipeWriter.Close() err: %s\n", err) - } - - errch <- err - }() - - if err := jsonmessage.DisplayJSONMessagesStream(pipeReader, builder.OutStream, builder.fdOut, builder.isTerminalOut); err != nil { - return fmt.Errorf("Failed to process json stream error: %s", err) - } - - if err := <-errch; err != nil { - return fmt.Errorf("Failed to build image: %s", err) - } - - // It is the best way to have built image id so far - // The other option would be to tag the image, and then remove the tag - // http://stackoverflow.com/questions/19776308/get-image-id-from-image-created-via-remote-api - matches := captureImageID.FindStringSubmatch(buf.String()) - if len(matches) == 0 { - return fmt.Errorf("Couldn't find image id out of docker build output") - } - imageID := matches[1] - - // Retrieve image id - image, err := builder.Docker.InspectImage(imageID) - if err != nil { - // fix go-dockerclient non descriptive error - if err.Error() == "no such image" { - err = fmt.Errorf("No such image: %s", imageID) - } - return err - } - - builder.imageID = image.ID - builder.Config = image.Config - - // clean it up - builder.dockerfile = &parser.Node{} - - return nil -} - -func (builder *Builder) addLabels(labels map[string]string) { - if builder.Config.Labels == nil { - builder.Config.Labels = map[string]string{} - } - for k, v := range labels { - builder.Config.Labels[k] = v - } -} - -func (builder *Builder) temporaryCmd(cmd []string) func() { - origCmd := builder.Config.Cmd - builder.Config.Cmd = cmd - return func() { - builder.Config.Cmd = origCmd - } -} - -func (builder *Builder) temporaryConfig(fn func()) func() { - // actually copy the whole config - origConfig := *builder.Config - fn() - return func() { - builder.Config = &origConfig - } -} - -func (builder *Builder) probeCache() (bool, error) { - if !builder.UtilizeCache || builder.cacheBusted { - return false, nil - } - - cache, err := builder.imageGetCached(builder.imageID, builder.Config) - if err != nil { - return false, err - } - if cache == nil { - builder.cacheBusted = true - return false, nil - } - - fmt.Fprintf(builder.OutStream, "[Rocker] ---> Using cache\n") - - builder.imageID = cache.ID - return true, nil -} - -func (builder *Builder) imageGetCached(imageID string, config *docker.Config) (*docker.Image, error) { - // Retrieve all images and cache, because it might be a heavy operation - if builder.imagesCache == nil { - var err error - if builder.imagesCache, err = builder.Docker.ListImages(docker.ListImagesOptions{All: true}); err != nil { - return nil, err - } - } - - var siblings []string - for _, img := range builder.imagesCache { - if img.ParentID != imageID { - continue - } - siblings = append(siblings, img.ID) - } - - // Loop on the children of the given image and check the config - var match *docker.Image - - if len(siblings) == 0 { - return match, nil - } - - // TODO: ensure goroutines die if return abnormally - - ch := make(chan *docker.Image) - errch := make(chan error) - numResponses := 0 - - for _, siblingID := range siblings { - go func(siblingID string) { - image, err := builder.Docker.InspectImage(siblingID) - if err != nil { - errch <- err - return - } - ch <- image - }(siblingID) - } - - for { - select { - case image := <-ch: - if CompareConfigs(&image.ContainerConfig, config) { - if match == nil || match.Created.Before(image.Created) { - match = image - } - } - - numResponses++ - - if len(siblings) == numResponses { - return match, nil - } - - case err := <-errch: - return nil, err - - case <-time.After(10 * time.Second): - // TODO: return "cache didn't hit"? - return nil, fmt.Errorf("Timeout while fetching cached images") - } - } -} - -func (builder *Builder) ensureImage(imageName string, purpose string) error { - _, err := builder.Docker.InspectImage(imageName) - if err != nil && err.Error() == "no such image" { - fmt.Fprintf(builder.OutStream, "[Rocker] Pulling image: %s for %s\n", imageName, purpose) - - image := imagename.NewFromString(imageName) - - pipeReader, pipeWriter := io.Pipe() - - pullOpts := docker.PullImageOptions{ - Repository: image.NameWithRegistry(), - Registry: image.Registry, - Tag: image.GetTag(), - OutputStream: pipeWriter, - RawJSONStream: true, - } - - errch := make(chan error) - - go func() { - err := builder.Docker.PullImage(pullOpts, *builder.Auth) - - if err := pipeWriter.Close(); err != nil { - fmt.Fprintf(builder.OutStream, "pipeWriter.Close() err: %s\n", err) - } - - errch <- err - }() - - if err := jsonmessage.DisplayJSONMessagesStream(pipeReader, builder.OutStream, builder.fdOut, builder.isTerminalOut); err != nil { - return fmt.Errorf("Failed to process json stream for image: %s, error: %s", image, err) - } - - if err := <-errch; err != nil { - return fmt.Errorf("Failed to pull image: %s, error: %s", image, err) - } - } else if err != nil { - return err - } - return nil -} - -func (builder *Builder) pushImage(image imagename.ImageName) (digest string, err error) { - - var ( - pipeReader, pipeWriter = io.Pipe() - errch = make(chan error) - - buf bytes.Buffer - outStream = io.MultiWriter(pipeWriter, &buf) - ) - - go func() { - err := builder.Docker.PushImage(docker.PushImageOptions{ - Name: image.NameWithRegistry(), - Tag: image.GetTag(), - Registry: image.Registry, - OutputStream: outStream, - RawJSONStream: true, - }, *builder.Auth) - - if err := pipeWriter.Close(); err != nil { - fmt.Fprintf(builder.OutStream, "pipeWriter.Close() err: %s\n", err) - } - - errch <- err - }() - - if err := jsonmessage.DisplayJSONMessagesStream(pipeReader, builder.OutStream, builder.fdOut, builder.isTerminalOut); err != nil { - return "", fmt.Errorf("Failed to process json stream for image: %s, error: %s", image, err) - } - - if err := <-errch; err != nil { - return "", fmt.Errorf("Failed to push image: %s, error: %s", image, err) - } - - // It is the best way to have pushed image digest so far - matches := captureDigest.FindStringSubmatch(buf.String()) - if len(matches) > 0 { - digest = matches[1] - } - - return digest, nil -} - -func (builder *Builder) makeExportsContainer() (string, error) { - if builder.exportsContainerID != "" { - return builder.exportsContainerID, nil - } - exportsContainerName := builder.exportsContainerName() - - containerConfig := &docker.Config{ - Image: rsyncImage, - Volumes: map[string]struct{}{ - "/opt/rsync/bin": struct{}{}, - exportsVolume: struct{}{}, - }, - Labels: map[string]string{ - "Rockerfile": builder.Rockerfile, - "ImageId": builder.imageID, - }, - } - - container, err := builder.ensureContainer(exportsContainerName, containerConfig, "exports") - if err != nil { - return "", err - } - - builder.exportsContainerID = container.ID - - return container.ID, nil -} - -func (builder *Builder) getMountContainerIds() []string { - containerIds := make(map[string]struct{}) - for _, mount := range builder.mounts { - if mount.containerID != "" { - containerIds[mount.containerID] = struct{}{} - } - } - result := []string{} - for containerID := range containerIds { - result = append(result, containerID) - } - return result -} - -func (builder *Builder) getAllMountContainerIds() []string { - containerIds := make(map[string]struct{}) - for _, mount := range builder.allMounts { - if mount.containerID != "" { - containerIds[mount.containerID] = struct{}{} - } - } - result := []string{} - for containerID := range containerIds { - result = append(result, containerID) - } - return result -} - -func (builder *Builder) getBinds() []string { - var result []string - for _, mount := range builder.mounts { - if mount.containerID == "" { - result = append(result, mount.src+":"+mount.dest) - } - } - return result -} diff --git a/src/rocker/build2/plan.go b/src/rocker/build/plan.go similarity index 99% rename from src/rocker/build2/plan.go rename to src/rocker/build/plan.go index ca593759..dd2792fa 100644 --- a/src/rocker/build2/plan.go +++ b/src/rocker/build/plan.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package build2 +package build import "strings" diff --git a/src/rocker/build2/plan_test.go b/src/rocker/build/plan_test.go similarity index 99% rename from src/rocker/build2/plan_test.go rename to src/rocker/build/plan_test.go index e2608f95..f008ff0d 100644 --- a/src/rocker/build2/plan_test.go +++ b/src/rocker/build/plan_test.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package build2 +package build import ( "testing" diff --git a/src/rocker/build/rockerfile.go b/src/rocker/build/rockerfile.go index b7fcd39b..d5041400 100644 --- a/src/rocker/build/rockerfile.go +++ b/src/rocker/build/rockerfile.go @@ -18,71 +18,76 @@ package build import ( "bytes" - "encoding/json" + "fmt" "io" - "strings" - + "io/ioutil" + "os" "rocker/parser" + "rocker/template" + "strings" ) -// Parse parses a Rockerfile from an io.Reader and returns AST data structure -func Parse(rockerfileContent io.Reader) (*parser.Node, error) { - node, err := parser.Parse(rockerfileContent) +type Rockerfile struct { + Name string + Source string + Content string + Vars template.Vars + Funs template.Funs + + rootNode *parser.Node +} + +func NewRockerfileFromFile(name string, vars template.Vars, funs template.Funs) (r *Rockerfile, err error) { + fd, err := os.Open(name) if err != nil { return nil, err } + defer fd.Close() - return node, nil + return NewRockerfile(name, fd, vars, funs) } -// RockerfileAstToString returns printable AST of the node -func RockerfileAstToString(node *parser.Node) (str string, err error) { - str += node.Value +func NewRockerfile(name string, in io.Reader, vars template.Vars, funs template.Funs) (r *Rockerfile, err error) { + r = &Rockerfile{ + Name: name, + Vars: vars, + Funs: funs, + } - isKeyVal := node.Value == "env" || node.Value == "label" + var ( + source []byte + content *bytes.Buffer + ) - if len(node.Flags) > 0 { - str += " " + strings.Join(node.Flags, " ") + if source, err = ioutil.ReadAll(in); err != nil { + return nil, fmt.Errorf("Failed to read Rockerfile %s, error: %s", name, err) } - if node.Attributes["json"] { - args := []string{} - for n := node.Next; n != nil; n = n.Next { - args = append(args, n.Value) - } - var buf bytes.Buffer - if err := json.NewEncoder(&buf).Encode(args); err != nil { - return str, err - } - str += " " + strings.TrimSpace(buf.String()) - return str, nil + r.Source = string(source) + + if content, err = template.Process(name, bytes.NewReader(source), vars, funs); err != nil { + return nil, err } - for _, n := range node.Children { - children, err := RockerfileAstToString(n) - if err != nil { - return str, err - } - str += children + "\n" + r.Content = content.String() + + // TODO: update parser from Docker + + if r.rootNode, err = parser.Parse(content); err != nil { + return nil, err } - if node.Next != nil { - for n, i := node.Next, 0; n != nil; n, i = n.Next, i+1 { - if len(n.Children) > 0 { - children, err := RockerfileAstToString(n) - if err != nil { - return str, err - } - str += " " + children - } else if isKeyVal && i%2 != 0 { - str += "=" + n.Value - } else { - str += " " + n.Value - } - } + return r, nil +} + +func (r *Rockerfile) Commands() []ConfigCommand { + commands := []ConfigCommand{} + + for i := 0; i < len(r.rootNode.Children); i++ { + commands = append(commands, parseCommand(r.rootNode.Children[i], false)) } - return strings.TrimSpace(str), nil + return commands } func handleJSONArgs(args []string, attributes map[string]bool) []string { @@ -98,6 +103,49 @@ func handleJSONArgs(args []string, attributes map[string]bool) []string { return []string{strings.Join(args, " ")} } +func parseCommand(node *parser.Node, isOnbuild bool) ConfigCommand { + cfg := ConfigCommand{ + name: node.Value, + attrs: node.Attributes, + original: node.Original, + args: []string{}, + flags: parseFlags(node.Flags), + isOnbuild: isOnbuild, + } + + // fill in args and substitute vars + for n := node.Next; n != nil; n = n.Next { + cfg.args = append(cfg.args, n.Value) + } + + return cfg +} + +func parseOnbuildCommands(onBuildTriggers []string) ([]ConfigCommand, error) { + commands := []ConfigCommand{} + + for _, step := range onBuildTriggers { + + ast, err := parser.Parse(strings.NewReader(step)) + if err != nil { + return commands, err + } + + for _, n := range ast.Children { + switch strings.ToUpper(n.Value) { + case "ONBUILD": + return commands, fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") + case "MAINTAINER", "FROM": + return commands, fmt.Errorf("%s isn't allowed as an ONBUILD trigger", n.Value) + } + + commands = append(commands, parseCommand(n, true)) + } + } + + return commands, nil +} + func parseFlags(flags []string) map[string]string { result := make(map[string]string) for _, flag := range flags { diff --git a/src/rocker/build/rockerfile_test.go b/src/rocker/build/rockerfile_test.go index 7778f875..517cf147 100644 --- a/src/rocker/build/rockerfile_test.go +++ b/src/rocker/build/rockerfile_test.go @@ -17,96 +17,61 @@ package build import ( - "io/ioutil" - "os" + "rocker/template" "strings" "testing" "github.com/stretchr/testify/assert" ) -func TestConfigParse(t *testing.T) { - t.Parallel() - - fd, err := os.Open("testdata/Rockerfile") - if err != nil { - t.Fatal(err) - } - - node, err := Parse(fd) - if err != nil { - t.Fatal(err) - } - - t.Logf("Node: %v", node.Dump()) - - expected, err := ioutil.ReadFile("testdata/Rockerfile_result") +func TestNewRockerfile_Base(t *testing.T) { + src := `FROM {{ .BaseImage }}` + vars := template.Vars{"BaseImage": "ubuntu"} + r, err := NewRockerfile("test", strings.NewReader(src), vars, template.Funs{}) if err != nil { t.Fatal(err) } - assert.Equal(t, string(expected), node.Dump()+"\n", "invalid AST parsed from Rockerfile") + assert.Equal(t, src, r.Source) + assert.Equal(t, "FROM ubuntu", r.Content) } -func TestConfigRockerfileAstToString_Base(t *testing.T) { - t.Parallel() - - fd, err := os.Open("testdata/Rockerfile") - if err != nil { - t.Fatal(err) - } - - node, err := Parse(fd) +func TestNewRockerfileFromFile(t *testing.T) { + r, err := NewRockerfileFromFile("testdata/Rockerfile", template.Vars{}, template.Funs{}) if err != nil { t.Fatal(err) } - str, err := RockerfileAstToString(node) - if err != nil { - t.Fatal(err) - } - t.Logf("Node String: %v", str) - - expected, err := ioutil.ReadFile("testdata/Rockerfile_string_result") - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, string(expected), str+"\n", "invalid Rockerfile dumped to string") + assert.Equal(t, `from "some-java8-image-dev:1"`, r.rootNode.Children[0].Dump()) } -func TestConfigRockerfileAstToString_CmdJson(t *testing.T) { - t.Parallel() - - node, err := Parse(strings.NewReader("FROM scratch\nCMD [\"-\"]\n")) +func TestRockerfileCommands(t *testing.T) { + src := `FROM ubuntu` + r, err := NewRockerfile("test", strings.NewReader(src), template.Vars{}, template.Funs{}) if err != nil { t.Fatal(err) } - str, err := RockerfileAstToString(node) - if err != nil { - t.Fatal(err) - } - t.Logf("Node String: %v", str) - - assert.Equal(t, "from scratch\ncmd [\"-\"]", str, "invalid Rockerfile dumped to string") + commands := r.Commands() + assert.Len(t, commands, 1) + assert.Equal(t, "from", commands[0].name) + assert.Equal(t, "ubuntu", commands[0].args[0]) } -func TestConfigRockerfileAstToString_KeyVals(t *testing.T) { - t.Parallel() - - node, err := Parse(strings.NewReader("FROM scratch\nENV NAME=JOHN\\\n LASTNAME=DOE\nMOUNT a b c\nLABEL ASD QWE SDF")) - if err != nil { - t.Fatal(err) +func TestRockerfileParseOnbuildCommands(t *testing.T) { + triggers := []string{ + "RUN make", + "RUN make install", } - str, err := RockerfileAstToString(node) + commands, err := parseOnbuildCommands(triggers) if err != nil { t.Fatal(err) } - // t.Logf("Node String: %v", str) - // pretty.Println(node) - // t.Logf("Node: %v", node.Dump()) - assert.Equal(t, "from scratch\nenv NAME=JOHN LASTNAME=DOE\nmount a b c\nlabel ASD=QWE SDF", str, "invalid Rockerfile dumped to string") + assert.Len(t, commands, 2) + assert.Equal(t, "run", commands[0].name) + assert.Equal(t, []string{"make"}, commands[0].args) + assert.Equal(t, "run", commands[1].name) + assert.Equal(t, []string{"make install"}, commands[1].args) } diff --git a/src/rocker/build/semver.go b/src/rocker/build/semver.go deleted file mode 100644 index 18427b87..00000000 --- a/src/rocker/build/semver.go +++ /dev/null @@ -1,64 +0,0 @@ -/*- - * Copyright 2015 Grammarly, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Light semver implementation, we cannot use 'semver' package because -// it does not export 'version' property that we need here. - -package build - -import ( - "fmt" - "regexp" - "strconv" -) - -var semverRegexp = regexp.MustCompile(`^\bv?(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)(?:-([\da-z\-]+(?:\.[\da-z\-]+)*))?\b$`) - -// Semver represents a light version of 'semver' data structure -type Semver struct { - Major int - Minor int - Patch int - Suffix string -} - -// NewSemver parses a semver string into the Semver struct -func NewSemver(str string) (semver *Semver, err error) { - matches := semverRegexp.FindAllStringSubmatch(str, -1) - if matches == nil { - return nil, fmt.Errorf("Failed to parse given version as semver: %s", str) - } - - semver = &Semver{} - - if semver.Major, err = strconv.Atoi(matches[0][1]); err != nil { - return nil, err - } - if semver.Minor, err = strconv.Atoi(matches[0][2]); err != nil { - return nil, err - } - if semver.Patch, err = strconv.Atoi(matches[0][3]); err != nil { - return nil, err - } - semver.Suffix = matches[0][4] - - return semver, nil -} - -// HasSuffix returns true if the suffix (such as `-build123`) is present for the version -func (semver *Semver) HasSuffix() bool { - return semver.Suffix != "" -} diff --git a/src/rocker/build2/state.go b/src/rocker/build/state.go similarity index 98% rename from src/rocker/build2/state.go rename to src/rocker/build/state.go index 1162a47a..dcd89a57 100644 --- a/src/rocker/build2/state.go +++ b/src/rocker/build/state.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package build2 +package build import ( "fmt" diff --git a/src/rocker/build2/tar.go b/src/rocker/build/tar.go similarity index 99% rename from src/rocker/build2/tar.go rename to src/rocker/build/tar.go index 22cf80a9..831340d6 100644 --- a/src/rocker/build2/tar.go +++ b/src/rocker/build/tar.go @@ -5,7 +5,7 @@ // // Copyright 2013-2015 Docker, Inc. -package build2 +package build import ( "archive/tar" diff --git a/src/rocker/build2/tar_unix.go b/src/rocker/build/tar_unix.go similarity index 98% rename from src/rocker/build2/tar_unix.go rename to src/rocker/build/tar_unix.go index ee64d392..d0d55e66 100644 --- a/src/rocker/build2/tar_unix.go +++ b/src/rocker/build/tar_unix.go @@ -7,7 +7,7 @@ // // Copyright 2013-2015 Docker, Inc. -package build2 +package build import ( "archive/tar" diff --git a/src/rocker/build2/tar_windows.go b/src/rocker/build/tar_windows.go similarity index 99% rename from src/rocker/build2/tar_windows.go rename to src/rocker/build/tar_windows.go index 8229b728..f2c29f37 100644 --- a/src/rocker/build2/tar_windows.go +++ b/src/rocker/build/tar_windows.go @@ -7,7 +7,7 @@ // // Copyright 2013-2015 Docker, Inc. -package build2 +package build import ( "archive/tar" diff --git a/src/rocker/build/testdata/Rockerfile_result b/src/rocker/build/testdata/Rockerfile_result deleted file mode 100644 index 03e89cf1..00000000 --- a/src/rocker/build/testdata/Rockerfile_result +++ /dev/null @@ -1,15 +0,0 @@ -(from "some-java8-image-dev:1") -(run "apt-get update && apt-get install -y nodejs npm && npm install -g bower && rm -rf /var/lib/apt/lists/*") -(run "echo \"{ \\\"allow_root\\\": true }\" > /root/.bowerrc") -(run "ln -sf /usr/bin/nodejs /usr/bin/node") -(add ["--user=john" "--ignore-mtime"] "." "/src") -(workdir "/src") -(onbuild (add "." "/")) -(mount "/root/.gradle") -(mount "$GIT_SSH_KEY:/root/.ssh/id_rsa") -(run "gradle --refresh-dependencies --stacktrace clean test") -(export "/src/corgi-app/build/distributions/app.tar") -(from "some-java8-image:1") -(import "app.tar" "/opt") -(cmd "/sbin/my_init" "/opt/app/bin/app") -(push "mycompany/app:$branch-$version") diff --git a/src/rocker/build/testdata/Rockerfile_string_result b/src/rocker/build/testdata/Rockerfile_string_result deleted file mode 100644 index badcb9e8..00000000 --- a/src/rocker/build/testdata/Rockerfile_string_result +++ /dev/null @@ -1,15 +0,0 @@ -from some-java8-image-dev:1 -run apt-get update && apt-get install -y nodejs npm && npm install -g bower && rm -rf /var/lib/apt/lists/* -run echo "{ \"allow_root\": true }" > /root/.bowerrc -run ln -sf /usr/bin/nodejs /usr/bin/node -add --user=john --ignore-mtime . /src -workdir /src -onbuild add [".","/"] -mount /root/.gradle -mount $GIT_SSH_KEY:/root/.ssh/id_rsa -run gradle --refresh-dependencies --stacktrace clean test -export /src/corgi-app/build/distributions/app.tar -from some-java8-image:1 -import app.tar /opt -cmd ["/sbin/my_init","/opt/app/bin/app"] -push mycompany/app:$branch-$version diff --git a/src/rocker/build/tty.go b/src/rocker/build/tty.go deleted file mode 100644 index f198fca4..00000000 --- a/src/rocker/build/tty.go +++ /dev/null @@ -1,70 +0,0 @@ -// This code is borrowed from Docker -// Licensed under the Apache License, Version 2.0; Copyright 2013-2015 Docker, Inc. See LICENSE.APACHE -// NOTICE: no changes has been made to these functions code - -package build - -import ( - "fmt" - "os" - gosignal "os/signal" - "runtime" - "time" - - "github.com/docker/docker/pkg/signal" - "github.com/docker/docker/pkg/term" -) - -func (builder *Builder) monitorTtySize(id string) error { - builder.resizeTty(id) - - if runtime.GOOS == "windows" { - go func() { - prevH, prevW := builder.getTtySize() - for { - time.Sleep(time.Millisecond * 250) - h, w := builder.getTtySize() - - if prevW != w || prevH != h { - builder.resizeTty(id) - } - prevH = h - prevW = w - } - }() - } else { - sigchan := make(chan os.Signal, 1) - gosignal.Notify(sigchan, signal.SIGWINCH) - go func() { - for range sigchan { - builder.resizeTty(id) - } - }() - } - return nil -} - -func (builder *Builder) resizeTty(id string) { - height, width := builder.getTtySize() - if height == 0 && width == 0 { - return - } - - if err := builder.Docker.ResizeContainerTTY(id, height, width); err != nil { - fmt.Fprintf(builder.OutStream, "Failed to resize container TTY %s, error: %s\n", id, err) - } -} - -func (builder *Builder) getTtySize() (int, int) { - if !builder.isTerminalOut { - return 0, 0 - } - ws, err := term.GetWinsize(builder.fdOut) - if err != nil { - fmt.Fprintf(builder.OutStream, "Error getting TTY size: %s\n", err) - if ws == nil { - return 0, 0 - } - } - return int(ws.Height), int(ws.Width) -} diff --git a/src/rocker/build2/util.go b/src/rocker/build/util.go similarity index 99% rename from src/rocker/build2/util.go rename to src/rocker/build/util.go index 7bb052dd..d91b5e5f 100644 --- a/src/rocker/build2/util.go +++ b/src/rocker/build/util.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package build2 +package build import ( "crypto/md5" diff --git a/src/rocker/build2/commands.go b/src/rocker/build2/commands.go deleted file mode 100644 index 455d534e..00000000 --- a/src/rocker/build2/commands.go +++ /dev/null @@ -1,1221 +0,0 @@ -/*- - * Copyright 2015 Grammarly, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package build2 - -import ( - "fmt" - "os" - "path" - "path/filepath" - "regexp" - "rocker/util" - "sort" - "strings" - - log "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/nat" - "github.com/docker/docker/pkg/units" - "github.com/fsouza/go-dockerclient" -) - -const ( - COMMIT_SKIP = "COMMIT_SKIP" -) - -type ConfigCommand struct { - name string - args []string - attrs map[string]bool - flags map[string]string - original string - isOnbuild bool -} - -type Command interface { - // Execute does the command execution and returns modified state. - // Note that here we use State not by reference because we want - // it to be immutable. In future, it may encoded/decoded from json - // and passed to the external command implementations. - Execute(b *Build) (State, error) - - // Returns true if the command should be executed - ShouldRun(b *Build) (bool, error) - - // String returns the human readable string representation of the command - String() string -} - -func NewCommand(cfg ConfigCommand) (cmd Command, err error) { - // TODO: use reflection? - switch cfg.name { - case "from": - cmd = &CommandFrom{cfg} - case "maintainer": - cmd = &CommandMaintainer{cfg} - case "run": - cmd = &CommandRun{cfg} - case "attach": - cmd = &CommandAttach{cfg} - case "env": - cmd = &CommandEnv{cfg} - case "label": - cmd = &CommandLabel{cfg} - case "workdir": - cmd = &CommandWorkdir{cfg} - case "tag": - cmd = &CommandTag{cfg} - case "push": - cmd = &CommandPush{cfg} - case "copy": - cmd = &CommandCopy{cfg} - case "add": - cmd = &CommandAdd{cfg} - case "cmd": - cmd = &CommandCmd{cfg} - case "entrypoint": - cmd = &CommandEntrypoint{cfg} - case "expose": - cmd = &CommandExpose{cfg} - case "volume": - cmd = &CommandVolume{cfg} - case "user": - cmd = &CommandUser{cfg} - case "onbuild": - cmd = &CommandOnbuild{cfg} - case "mount": - cmd = &CommandMount{cfg} - case "export": - cmd = &CommandExport{cfg} - case "import": - cmd = &CommandImport{cfg} - default: - return nil, fmt.Errorf("Unknown command: %s", cfg.name) - } - - if cfg.isOnbuild { - cmd = &CommandOnbuildWrap{cmd} - } - - return cmd, nil -} - -// CommandFrom implements FROM -type CommandFrom struct { - cfg ConfigCommand -} - -func (c *CommandFrom) String() string { - return c.cfg.original -} - -func (c *CommandFrom) ShouldRun(b *Build) (bool, error) { - return true, nil -} - -func (c *CommandFrom) Execute(b *Build) (s State, err error) { - // TODO: for "scratch" image we may use /images/create - - if len(c.cfg.args) != 1 { - return s, fmt.Errorf("FROM requires one argument") - } - - var ( - img *docker.Image - name = c.cfg.args[0] - ) - - if name == "scratch" { - s.NoBaseImage = true - return s, nil - } - - // If Pull is true, then img will remain nil and it will be pulled below - if !b.cfg.Pull { - if img, err = b.client.InspectImage(name); err != nil { - return s, err - } - } - - if img == nil { - if err = b.client.PullImage(name); err != nil { - return s, err - } - if img, err = b.client.InspectImage(name); err != nil { - return s, err - } - if img == nil { - return s, fmt.Errorf("FROM: Failed to inspect image after pull: %s", name) - } - } - - // We want to say the size of the FROM image. Better to do it - // from the client, but don't know how to do it better, - // without duplicating InspectImage calls and making unnecessary functions - - log.WithFields(log.Fields{ - "size": units.HumanSize(float64(img.VirtualSize)), - }).Infof("| Image %.12s", img.ID) - - s = b.state - s.ImageID = img.ID - s.Config = *img.Config - - b.ProducedSize = 0 - b.VirtualSize = img.VirtualSize - - // If we don't have OnBuild triggers, then we are done - if len(s.Config.OnBuild) == 0 { - return s, nil - } - - log.Infof("| Found %d ONBUILD triggers", len(s.Config.OnBuild)) - - // Remove them from the config, since the config will be committed. - s.InjectCommands = s.Config.OnBuild - s.Config.OnBuild = []string{} - - return s, nil -} - -// CommandMaintainer implements CMD -type CommandMaintainer struct { - cfg ConfigCommand -} - -func (c *CommandMaintainer) String() string { - return c.cfg.original -} - -func (c *CommandMaintainer) ShouldRun(b *Build) (bool, error) { - return true, nil -} - -func (c *CommandMaintainer) Execute(b *Build) (State, error) { - if len(c.cfg.args) != 1 { - return b.state, fmt.Errorf("MAINTAINER requires exactly one argument") - } - - // Don't see any sense of doing a commit here, as Docker does - - return b.state, nil -} - -// CommandReset cleans the builder state before the next FROM -type CommandCleanup struct { - final bool - tagged bool -} - -func (c *CommandCleanup) String() string { - return "Cleaning up" -} - -func (c *CommandCleanup) ShouldRun(b *Build) (bool, error) { - return true, nil -} - -func (c *CommandCleanup) Execute(b *Build) (State, error) { - s := b.state - - if b.cfg.NoGarbage && !c.tagged && s.ImageID != "" && s.ProducedImage { - if err := b.client.RemoveImage(s.ImageID); err != nil { - return s, err - } - } - - // Cleanup state - dirtyState := s - s = NewState(b) - - // Keep some stuff between froms - s.ExportsID = dirtyState.ExportsID - - // For final cleanup we want to keep imageID - if c.final { - s.ImageID = dirtyState.ImageID - } else { - log.Infof("====================================") - } - - return s, nil -} - -// CommandCommit commits collected changes -type CommandCommit struct{} - -func (c *CommandCommit) String() string { - return "Commit changes" -} - -func (c *CommandCommit) ShouldRun(b *Build) (bool, error) { - return b.state.GetCommits() != "", nil -} - -func (c *CommandCommit) Execute(b *Build) (s State, err error) { - s = b.state - - commits := s.GetCommits() - if commits == "" { - return s, nil - } - - if s.ImageID == "" && !s.NoBaseImage { - return s, fmt.Errorf("Please provide a source image with `from` prior to commit") - } - - // TODO: ? - // if len(commits) == 0 && s.ContainerID == "" { log.Infof("| Skip") - - // TODO: verify that we need to check cache in commit only for - // a non-container actions - - if s.ContainerID == "" { - - // Check cache - var hit bool - s, hit, err = b.probeCache(s) - if err != nil { - return s, err - } - if hit { - return s, nil - } - - origCmd := s.Config.Cmd - s.Config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + commits} - - if s.ContainerID, err = b.client.CreateContainer(s); err != nil { - return s, err - } - - s.Config.Cmd = origCmd - } - - defer func(id string) { - s.Commits = []string{} - if err = b.client.RemoveContainer(id); err != nil { - log.Errorf("Failed to remove temporary container %.12s, error: %s", id, err) - } - }(s.ContainerID) - - var img *docker.Image - if img, err = b.client.CommitContainer(s, commits); err != nil { - return s, err - } - - s.ContainerID = "" - s.ParentID = s.ImageID - s.ImageID = img.ID - s.ProducedImage = true - - if b.cache != nil { - b.cache.Put(s) - } - - // Store some stuff to the build - b.ProducedSize += img.Size - b.VirtualSize = img.VirtualSize - - return s, nil -} - -// CommandRun implements RUN -type CommandRun struct { - cfg ConfigCommand -} - -func (c *CommandRun) String() string { - return c.cfg.original -} - -func (c *CommandRun) ShouldRun(b *Build) (bool, error) { - return true, nil -} - -func (c *CommandRun) Execute(b *Build) (s State, err error) { - s = b.state - - if s.ImageID == "" && !s.NoBaseImage { - return s, fmt.Errorf("Please provide a source image with `FROM` prior to run") - } - - cmd := handleJSONArgs(c.cfg.args, c.cfg.attrs) - - if !c.cfg.attrs["json"] { - cmd = append([]string{"/bin/sh", "-c"}, cmd...) - } - - s.Commit("RUN %q", cmd) - - // Check cache - s, hit, err := b.probeCache(s) - if err != nil { - return s, err - } - if hit { - return s, nil - } - - // TODO: test with ENTRYPOINT - - // We run this command in the container using CMD - origCmd := s.Config.Cmd - s.Config.Cmd = cmd - - if s.ContainerID, err = b.client.CreateContainer(s); err != nil { - return s, err - } - - if err = b.client.RunContainer(s.ContainerID, false); err != nil { - b.client.RemoveContainer(s.ContainerID) - return s, err - } - - // Restore command after commit - s.Config.Cmd = origCmd - - return s, nil -} - -// CommandAttach implements ATTACH -type CommandAttach struct { - cfg ConfigCommand -} - -func (c *CommandAttach) String() string { - return c.cfg.original -} - -func (c *CommandAttach) ShouldRun(b *Build) (bool, error) { - // TODO: skip attach? - return true, nil -} - -func (c *CommandAttach) Execute(b *Build) (s State, err error) { - s = b.state - - // simply ignore this command if we don't wanna attach - if !b.cfg.Attach { - log.Infof("Skip ATTACH; use --attach option to get inside") - // s.SkipCommit() - return s, nil - } - - if s.ImageID == "" && !s.NoBaseImage { - return s, fmt.Errorf("Please provide a source image with `FROM` prior to ATTACH") - } - - cmd := handleJSONArgs(c.cfg.args, c.cfg.attrs) - - if len(cmd) == 0 { - cmd = []string{"/bin/sh"} - } else if !c.cfg.attrs["json"] { - cmd = append([]string{"/bin/sh", "-c"}, cmd...) - } - - // TODO: do s.commit unique - - // We run this command in the container using CMD - - // Backup the config so we can restore it later - origState := s - defer func() { - s = origState - }() - - s.Config.Cmd = cmd - s.Config.Entrypoint = []string{} - s.Config.Tty = true - s.Config.OpenStdin = true - s.Config.StdinOnce = true - s.Config.AttachStdin = true - s.Config.AttachStderr = true - s.Config.AttachStdout = true - - if s.ContainerID, err = b.client.CreateContainer(s); err != nil { - return s, err - } - - if err = b.client.RunContainer(s.ContainerID, true); err != nil { - b.client.RemoveContainer(s.ContainerID) - return s, err - } - - return s, nil -} - -// CommandEnv implements ENV -type CommandEnv struct { - cfg ConfigCommand -} - -func (c *CommandEnv) String() string { - return c.cfg.original -} - -func (c *CommandEnv) ShouldRun(b *Build) (bool, error) { - return true, nil -} - -func (c *CommandEnv) Execute(b *Build) (s State, err error) { - - s = b.state - args := c.cfg.args - - if len(args) == 0 { - return s, fmt.Errorf("ENV requires at least one argument") - } - - if len(args)%2 != 0 { - // should never get here, but just in case - return s, fmt.Errorf("Bad input to ENV, too many args") - } - - commitStr := "ENV" - - for j := 0; j < len(args); j += 2 { - // name ==> args[j] - // value ==> args[j+1] - newVar := strings.Join(args[j:j+2], "=") - commitStr += " " + newVar - - gotOne := false - for i, envVar := range s.Config.Env { - envParts := strings.SplitN(envVar, "=", 2) - if envParts[0] == args[j] { - s.Config.Env[i] = newVar - gotOne = true - break - } - } - if !gotOne { - s.Config.Env = append(s.Config.Env, newVar) - } - } - - s.Commit(commitStr) - - return s, nil -} - -// CommandLabel implements LABEL -type CommandLabel struct { - cfg ConfigCommand -} - -func (c *CommandLabel) String() string { - return c.cfg.original -} - -func (c *CommandLabel) ShouldRun(b *Build) (bool, error) { - return true, nil -} - -func (c *CommandLabel) Execute(b *Build) (s State, err error) { - - s = b.state - args := c.cfg.args - - if len(args) == 0 { - return s, fmt.Errorf("LABEL requires at least one argument") - } - - if len(args)%2 != 0 { - // should never get here, but just in case - return s, fmt.Errorf("Bad input to LABEL, too many args") - } - - commitStr := "LABEL" - - if s.Config.Labels == nil { - s.Config.Labels = map[string]string{} - } - - for j := 0; j < len(args); j++ { - // name ==> args[j] - // value ==> args[j+1] - newVar := args[j] + "=" + args[j+1] + "" - commitStr += " " + newVar - - s.Config.Labels[args[j]] = args[j+1] - j++ - } - - s.Commit(commitStr) - - return s, nil -} - -// CommandWorkdir implements WORKDIR -type CommandWorkdir struct { - cfg ConfigCommand -} - -func (c *CommandWorkdir) String() string { - return c.cfg.original -} - -func (c *CommandWorkdir) ShouldRun(b *Build) (bool, error) { - return true, nil -} - -func (c *CommandWorkdir) Execute(b *Build) (s State, err error) { - - s = b.state - - if len(c.cfg.args) != 1 { - return s, fmt.Errorf("WORKDIR requires exactly one argument") - } - - workdir := c.cfg.args[0] - - if !filepath.IsAbs(workdir) { - current := s.Config.WorkingDir - workdir = filepath.Join("/", current, workdir) - } - - s.Config.WorkingDir = workdir - - s.Commit(fmt.Sprintf("WORKDIR %v", workdir)) - - return s, nil -} - -// CommandCmd implements CMD -type CommandCmd struct { - cfg ConfigCommand -} - -func (c *CommandCmd) String() string { - return c.cfg.original -} - -func (c *CommandCmd) ShouldRun(b *Build) (bool, error) { - return true, nil -} - -func (c *CommandCmd) Execute(b *Build) (s State, err error) { - s = b.state - - cmd := handleJSONArgs(c.cfg.args, c.cfg.attrs) - - if !c.cfg.attrs["json"] { - cmd = append([]string{"/bin/sh", "-c"}, cmd...) - } - - s.Config.Cmd = cmd - - s.Commit(fmt.Sprintf("CMD %q", cmd)) - - if len(c.cfg.args) != 0 { - s.CmdSet = true - } - - return s, nil -} - -// CommandEntrypoint implements ENTRYPOINT -type CommandEntrypoint struct { - cfg ConfigCommand -} - -func (c *CommandEntrypoint) String() string { - return c.cfg.original -} - -func (c *CommandEntrypoint) ShouldRun(b *Build) (bool, error) { - return true, nil -} - -func (c *CommandEntrypoint) Execute(b *Build) (s State, err error) { - s = b.state - - parsed := handleJSONArgs(c.cfg.args, c.cfg.attrs) - - switch { - case c.cfg.attrs["json"]: - // ENTRYPOINT ["echo", "hi"] - s.Config.Entrypoint = parsed - case len(parsed) == 0: - // ENTRYPOINT [] - s.Config.Entrypoint = nil - default: - // ENTRYPOINT echo hi - s.Config.Entrypoint = []string{"/bin/sh", "-c", parsed[0]} - } - - s.Commit(fmt.Sprintf("ENTRYPOINT %q", s.Config.Entrypoint)) - - // TODO: test this - // when setting the entrypoint if a CMD was not explicitly set then - // set the command to nil - if !s.CmdSet { - s.Config.Cmd = nil - } - - return s, nil -} - -// CommandExpose implements EXPOSE -type CommandExpose struct { - cfg ConfigCommand -} - -func (c *CommandExpose) String() string { - return c.cfg.original -} - -func (c *CommandExpose) ShouldRun(b *Build) (bool, error) { - return true, nil -} - -func (c *CommandExpose) Execute(b *Build) (s State, err error) { - - s = b.state - - if len(c.cfg.args) == 0 { - return s, fmt.Errorf("EXPOSE requires at least one argument") - } - - if s.Config.ExposedPorts == nil { - s.Config.ExposedPorts = map[docker.Port]struct{}{} - } - - ports, _, err := nat.ParsePortSpecs(c.cfg.args) - if err != nil { - return s, err - } - - // instead of using ports directly, we build a list of ports and sort it so - // the order is consistent. This prevents cache burst where map ordering - // changes between builds - portList := make([]string, len(ports)) - var i int - for port := range ports { - dockerPort := docker.Port(port) - if _, exists := s.Config.ExposedPorts[dockerPort]; !exists { - s.Config.ExposedPorts[dockerPort] = struct{}{} - } - portList[i] = string(port) - i++ - } - sort.Strings(portList) - - message := fmt.Sprintf("EXPOSE %s", strings.Join(portList, " ")) - s.Commit(message) - - return s, nil -} - -// CommandVolume implements VOLUME -type CommandVolume struct { - cfg ConfigCommand -} - -func (c *CommandVolume) String() string { - return c.cfg.original -} - -func (c *CommandVolume) ShouldRun(b *Build) (bool, error) { - return true, nil -} - -func (c *CommandVolume) Execute(b *Build) (s State, err error) { - - s = b.state - - if len(c.cfg.args) == 0 { - return s, fmt.Errorf("VOLUME requires at least one argument") - } - - if s.Config.Volumes == nil { - s.Config.Volumes = map[string]struct{}{} - } - for _, v := range c.cfg.args { - v = strings.TrimSpace(v) - if v == "" { - return s, fmt.Errorf("Volume specified can not be an empty string") - } - s.Config.Volumes[v] = struct{}{} - } - - s.Commit(fmt.Sprintf("VOLUME %v", c.cfg.args)) - - return s, nil -} - -// CommandUser implements USER -type CommandUser struct { - cfg ConfigCommand -} - -func (c *CommandUser) String() string { - return c.cfg.original -} - -func (c *CommandUser) ShouldRun(b *Build) (bool, error) { - return true, nil -} - -func (c *CommandUser) Execute(b *Build) (s State, err error) { - - s = b.state - - if len(c.cfg.args) != 1 { - return s, fmt.Errorf("USER requires exactly one argument") - } - - s.Config.User = c.cfg.args[0] - - s.Commit(fmt.Sprintf("USER %v", c.cfg.args)) - - return s, nil -} - -// CommandOnbuild implements ONBUILD -type CommandOnbuild struct { - cfg ConfigCommand -} - -func (c *CommandOnbuild) String() string { - return c.cfg.original -} - -func (c *CommandOnbuild) ShouldRun(b *Build) (bool, error) { - return true, nil -} - -func (c *CommandOnbuild) Execute(b *Build) (s State, err error) { - - s = b.state - - if len(c.cfg.args) == 0 { - return s, fmt.Errorf("ONBUILD requires at least one argument") - } - - command := strings.ToUpper(strings.TrimSpace(c.cfg.args[0])) - switch command { - case "ONBUILD": - return s, fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") - case "MAINTAINER", "FROM": - return s, fmt.Errorf("%s isn't allowed as an ONBUILD trigger", command) - } - - orig := regexp.MustCompile(`(?i)^\s*ONBUILD\s*`).ReplaceAllString(c.cfg.original, "") - - s.Config.OnBuild = append(s.Config.OnBuild, orig) - s.Commit(fmt.Sprintf("ONBUILD %s", orig)) - - return s, nil -} - -// CommandTag implements TAG -type CommandTag struct { - cfg ConfigCommand -} - -func (c *CommandTag) String() string { - return c.cfg.original -} - -func (c *CommandTag) ShouldRun(b *Build) (bool, error) { - return true, nil -} - -func (c *CommandTag) Execute(b *Build) (State, error) { - if len(c.cfg.args) != 1 { - return b.state, fmt.Errorf("TAG requires exactly one argument") - } - - if b.state.ImageID == "" { - return b.state, fmt.Errorf("Cannot TAG on empty image") - } - - if err := b.client.TagImage(b.state.ImageID, c.cfg.args[0]); err != nil { - return b.state, err - } - - return b.state, nil -} - -// CommandPush implements PUSH -type CommandPush struct { - cfg ConfigCommand -} - -func (c *CommandPush) String() string { - return c.cfg.original -} - -func (c *CommandPush) ShouldRun(b *Build) (bool, error) { - return true, nil -} - -func (c *CommandPush) Execute(b *Build) (State, error) { - if len(c.cfg.args) != 1 { - return b.state, fmt.Errorf("PUSH requires exactly one argument") - } - - if b.state.ImageID == "" { - return b.state, fmt.Errorf("Cannot PUSH empty image") - } - - if err := b.client.TagImage(b.state.ImageID, c.cfg.args[0]); err != nil { - return b.state, err - } - - if !b.cfg.Push { - log.Infof("| Don't push. Pass --push flag to actually push to the registry") - return b.state, nil - } - - if err := b.client.PushImage(c.cfg.args[0]); err != nil { - return b.state, err - } - - return b.state, nil -} - -// CommandCopy implements COPY -type CommandCopy struct { - cfg ConfigCommand -} - -func (c *CommandCopy) String() string { - return c.cfg.original -} - -func (c *CommandCopy) ShouldRun(b *Build) (bool, error) { - return true, nil -} - -func (c *CommandCopy) Execute(b *Build) (State, error) { - if len(c.cfg.args) < 2 { - return b.state, fmt.Errorf("COPY requires at least two arguments") - } - return copyFiles(b, c.cfg.args, "COPY") -} - -// CommandAdd implements ADD -// For now it is an alias of COPY, but later will add urls and archives to it -type CommandAdd struct { - cfg ConfigCommand -} - -func (c *CommandAdd) String() string { - return c.cfg.original -} - -func (c *CommandAdd) ShouldRun(b *Build) (bool, error) { - return true, nil -} - -func (c *CommandAdd) Execute(b *Build) (State, error) { - if len(c.cfg.args) < 2 { - return b.state, fmt.Errorf("ADD requires at least two arguments") - } - return copyFiles(b, c.cfg.args, "ADD") -} - -// CommandMount implements MOUNT -type CommandMount struct { - cfg ConfigCommand -} - -func (c *CommandMount) String() string { - return c.cfg.original -} - -func (c *CommandMount) ShouldRun(b *Build) (bool, error) { - return true, nil -} - -func (c *CommandMount) Execute(b *Build) (s State, err error) { - - s = b.state - - if len(c.cfg.args) == 0 { - return b.state, fmt.Errorf("MOUNT requires at least one argument") - } - - commitIds := []string{} - - for _, arg := range c.cfg.args { - - switch strings.Contains(arg, ":") { - // MOUNT src:dest - case true: - var ( - pair = strings.SplitN(arg, ":", 2) - src = pair[0] - dest = pair[1] - err error - ) - - // Process relative paths in volumes - if strings.HasPrefix(src, "~") { - src = strings.Replace(src, "~", os.Getenv("HOME"), 1) - } - if !path.IsAbs(src) { - src = path.Join(b.cfg.ContextDir, src) - } - - if src, err = b.client.ResolveHostPath(src); err != nil { - return s, err - } - - if s.HostConfig.Binds == nil { - s.HostConfig.Binds = []string{} - } - - s.HostConfig.Binds = append(s.HostConfig.Binds, src+":"+dest) - commitIds = append(commitIds, arg) - - // MOUNT dir - case false: - name, err := b.getVolumeContainer(arg) - if err != nil { - return s, err - } - - if s.HostConfig.VolumesFrom == nil { - s.HostConfig.VolumesFrom = []string{} - } - - s.HostConfig.VolumesFrom = append(s.HostConfig.VolumesFrom, name) - commitIds = append(commitIds, name+":"+arg) - } - } - - s.Commit(fmt.Sprintf("MOUNT %q", commitIds)) - - return s, nil -} - -// CommandExport implements EXPORT -type CommandExport struct { - cfg ConfigCommand -} - -func (c *CommandExport) String() string { - return c.cfg.original -} - -func (c *CommandExport) ShouldRun(b *Build) (bool, error) { - return true, nil -} - -func (c *CommandExport) Execute(b *Build) (s State, err error) { - - s = b.state - args := c.cfg.args - - if len(args) == 0 { - return s, fmt.Errorf("EXPORT requires at least one argument") - } - - // If only one argument was given to EXPORT, use basename of a file - // EXPORT /my/dir/file.tar --> /EXPORT_VOLUME/file.tar - if len(args) < 2 { - args = []string{args[0], "/"} - } - - src := args[0 : len(args)-1] - dest := args[len(args)-1] // last one is always the dest - - // EXPORT /my/dir my_dir --> /EXPORT_VOLUME/my_dir - // EXPORT /my/dir /my_dir --> /EXPORT_VOLUME/my_dir - // EXPORT /my/dir stuff/ --> /EXPORT_VOLUME/stuff/my_dir - // EXPORT /my/dir /stuff/ --> /EXPORT_VOLUME/stuff/my_dir - // EXPORT /my/dir/* / --> /EXPORT_VOLUME/stuff/my_dir - - exportsContainerID, err := b.getExportsContainer() - if err != nil { - return s, err - } - - // build the command - cmdDestPath, err := util.ResolvePath(ExportsPath, dest) - if err != nil { - return s, fmt.Errorf("Invalid EXPORT destination: %s", dest) - } - - s.Commit("EXPORT %q to %.12s:%s", src, exportsContainerID, dest) - - s, hit, err := b.probeCache(s) - if err != nil { - return s, err - } - if hit { - b.exports = append(b.exports, s.ExportsID) - return s, nil - } - - // Remember original stuff so we can restore it when we finished - var exportsId string - origState := s - - defer func() { - s = origState - s.ExportsID = exportsId - b.exports = append(b.exports, exportsId) - }() - - // Append exports container as a volume - s.HostConfig.VolumesFrom = []string{exportsContainerID} - - cmd := []string{"/opt/rsync/bin/rsync", "-a", "--delete-during"} - - if b.cfg.Verbose { - cmd = append(cmd, "--verbose") - } - - cmd = append(cmd, src...) - cmd = append(cmd, cmdDestPath) - - s.Config.Cmd = cmd - s.Config.Entrypoint = []string{} - - if exportsId, err = b.client.CreateContainer(s); err != nil { - return s, err - } - defer b.client.RemoveContainer(exportsId) - - log.Infof("| Running in %.12s: %s", exportsId, strings.Join(cmd, " ")) - - if err = b.client.RunContainer(exportsId, false); err != nil { - return s, err - } - - return s, nil -} - -// CommandImport implements IMPORT -type CommandImport struct { - cfg ConfigCommand -} - -func (c *CommandImport) String() string { - return c.cfg.original -} - -func (c *CommandImport) ShouldRun(b *Build) (bool, error) { - return true, nil -} - -func (c *CommandImport) Execute(b *Build) (s State, err error) { - s = b.state - args := c.cfg.args - - if len(args) == 0 { - return s, fmt.Errorf("IMPORT requires at least one argument") - } - if len(b.exports) == 0 { - return s, fmt.Errorf("You have to EXPORT something first in order to IMPORT") - } - - // TODO: EXPORT and IMPORT cache is not invalidated properly in between - // different tracks of the same build. The EXPORT may be cached - // because it was built earlier with the same prerequisites, but the actual - // data in the exports container may be from the latest EXPORT of different - // build. So we need to prefix ~/.rocker_exports dir with some id somehow. - - log.Infof("| Import from %s", b.exportsContainerName()) - - // If only one argument was given to IMPORT, use the same path for destination - // IMPORT /my/dir/file.tar --> ADD ./EXPORT_VOLUME/my/dir/file.tar /my/dir/file.tar - if len(args) < 2 { - args = []string{args[0], "/"} - } - dest := args[len(args)-1] // last one is always the dest - src := []string{} - - for _, arg := range args[0 : len(args)-1] { - argResolved, err := util.ResolvePath(ExportsPath, arg) - if err != nil { - return s, fmt.Errorf("Invalid IMPORT source: %s", arg) - } - src = append(src, argResolved) - } - - sort.Strings(b.exports) - s.Commit("IMPORT %q : %q %s", b.exports, src, dest) - - // Check cache - s, hit, err := b.probeCache(s) - if err != nil { - return s, err - } - if hit { - return s, nil - } - - // Remember original stuff so we can restore it when we finished - origState := s - - var importID string - - defer func() { - s = origState - s.ContainerID = importID - }() - - cmd := []string{"/opt/rsync/bin/rsync", "-a"} - - if b.cfg.Verbose { - cmd = append(cmd, "--verbose") - } - - cmd = append(cmd, src...) - cmd = append(cmd, dest) - - s.Config.Cmd = cmd - s.Config.Entrypoint = []string{} - s.HostConfig.VolumesFrom = []string{b.exportsContainerName()} - - if importID, err = b.client.CreateContainer(s); err != nil { - return s, err - } - - log.Infof("| Running in %.12s: %s", importID, strings.Join(cmd, " ")) - - if err = b.client.RunContainer(importID, false); err != nil { - return s, err - } - - // TODO: if b.exportsCacheBusted and IMPORT cache was invalidated, - // CommitCommand then caches it anyway. - - return s, nil -} - -// CommandOnbuildWrap wraps ONBUILD command -type CommandOnbuildWrap struct { - cmd Command -} - -func (c *CommandOnbuildWrap) String() string { - return "ONBUILD " + c.cmd.String() -} - -func (c *CommandOnbuildWrap) ShouldRun(b *Build) (bool, error) { - return true, nil -} - -func (c *CommandOnbuildWrap) Execute(b *Build) (State, error) { - return c.cmd.Execute(b) -} diff --git a/src/rocker/build2/rockerfile.go b/src/rocker/build2/rockerfile.go deleted file mode 100644 index 041d1680..00000000 --- a/src/rocker/build2/rockerfile.go +++ /dev/null @@ -1,164 +0,0 @@ -/*- - * Copyright 2015 Grammarly, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package build2 - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - "rocker/parser" - "rocker/template" - "strings" -) - -type Rockerfile struct { - Name string - Source string - Content string - Vars template.Vars - Funs template.Funs - - rootNode *parser.Node -} - -func NewRockerfileFromFile(name string, vars template.Vars, funs template.Funs) (r *Rockerfile, err error) { - fd, err := os.Open(name) - if err != nil { - return nil, err - } - defer fd.Close() - - return NewRockerfile(name, fd, vars, funs) -} - -func NewRockerfile(name string, in io.Reader, vars template.Vars, funs template.Funs) (r *Rockerfile, err error) { - r = &Rockerfile{ - Name: name, - Vars: vars, - Funs: funs, - } - - var ( - source []byte - content *bytes.Buffer - ) - - if source, err = ioutil.ReadAll(in); err != nil { - return nil, fmt.Errorf("Failed to read Rockerfile %s, error: %s", name, err) - } - - r.Source = string(source) - - if content, err = template.Process(name, bytes.NewReader(source), vars, funs); err != nil { - return nil, err - } - - r.Content = content.String() - - // TODO: update parser from Docker - - if r.rootNode, err = parser.Parse(content); err != nil { - return nil, err - } - - return r, nil -} - -func (r *Rockerfile) Commands() []ConfigCommand { - commands := []ConfigCommand{} - - for i := 0; i < len(r.rootNode.Children); i++ { - commands = append(commands, parseCommand(r.rootNode.Children[i], false)) - } - - return commands -} - -func handleJSONArgs(args []string, attributes map[string]bool) []string { - if len(args) == 0 { - return []string{} - } - - if attributes != nil && attributes["json"] { - return args - } - - // literal string command, not an exec array - return []string{strings.Join(args, " ")} -} - -func parseCommand(node *parser.Node, isOnbuild bool) ConfigCommand { - cfg := ConfigCommand{ - name: node.Value, - attrs: node.Attributes, - original: node.Original, - args: []string{}, - flags: parseFlags(node.Flags), - isOnbuild: isOnbuild, - } - - // fill in args and substitute vars - for n := node.Next; n != nil; n = n.Next { - cfg.args = append(cfg.args, n.Value) - } - - return cfg -} - -func parseOnbuildCommands(onBuildTriggers []string) ([]ConfigCommand, error) { - commands := []ConfigCommand{} - - for _, step := range onBuildTriggers { - - ast, err := parser.Parse(strings.NewReader(step)) - if err != nil { - return commands, err - } - - for _, n := range ast.Children { - switch strings.ToUpper(n.Value) { - case "ONBUILD": - return commands, fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") - case "MAINTAINER", "FROM": - return commands, fmt.Errorf("%s isn't allowed as an ONBUILD trigger", n.Value) - } - - commands = append(commands, parseCommand(n, true)) - } - } - - return commands, nil -} - -func parseFlags(flags []string) map[string]string { - result := make(map[string]string) - for _, flag := range flags { - key := flag[2:] - value := "" - - index := strings.Index(key, "=") - if index >= 0 { - value = key[index+1:] - key = key[:index] - } - - result[key] = value - } - return result -} diff --git a/src/rocker/build2/rockerfile_test.go b/src/rocker/build2/rockerfile_test.go deleted file mode 100644 index 700f8a42..00000000 --- a/src/rocker/build2/rockerfile_test.go +++ /dev/null @@ -1,77 +0,0 @@ -/*- - * Copyright 2015 Grammarly, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package build2 - -import ( - "rocker/template" - "strings" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestNewRockerfile_Base(t *testing.T) { - src := `FROM {{ .BaseImage }}` - vars := template.Vars{"BaseImage": "ubuntu"} - r, err := NewRockerfile("test", strings.NewReader(src), vars, template.Funs{}) - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, src, r.Source) - assert.Equal(t, "FROM ubuntu", r.Content) -} - -func TestNewRockerfileFromFile(t *testing.T) { - r, err := NewRockerfileFromFile("testdata/Rockerfile", template.Vars{}, template.Funs{}) - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, `from "some-java8-image-dev:1"`, r.rootNode.Children[0].Dump()) -} - -func TestRockerfileCommands(t *testing.T) { - src := `FROM ubuntu` - r, err := NewRockerfile("test", strings.NewReader(src), template.Vars{}, template.Funs{}) - if err != nil { - t.Fatal(err) - } - - commands := r.Commands() - assert.Len(t, commands, 1) - assert.Equal(t, "from", commands[0].name) - assert.Equal(t, "ubuntu", commands[0].args[0]) -} - -func TestRockerfileParseOnbuildCommands(t *testing.T) { - triggers := []string{ - "RUN make", - "RUN make install", - } - - commands, err := parseOnbuildCommands(triggers) - if err != nil { - t.Fatal(err) - } - - assert.Len(t, commands, 2) - assert.Equal(t, "run", commands[0].name) - assert.Equal(t, []string{"make"}, commands[0].args) - assert.Equal(t, "run", commands[1].name) - assert.Equal(t, []string{"make install"}, commands[1].args) -} diff --git a/src/rocker/build2/testdata/Rockerfile b/src/rocker/build2/testdata/Rockerfile deleted file mode 100644 index 7cf5c72e..00000000 --- a/src/rocker/build2/testdata/Rockerfile +++ /dev/null @@ -1,34 +0,0 @@ -FROM some-java8-image-dev:1 - -# Install nodejs, npm and bower -RUN \ - apt-get update && \ - apt-get install -y nodejs npm && \ - npm install -g bower && \ - rm -rf /var/lib/apt/lists/* - -RUN echo "{ \"allow_root\": true }" > /root/.bowerrc - -RUN ln -sf /usr/bin/nodejs /usr/bin/node - -ADD --user=john --ignore-mtime . /src -WORKDIR /src - -ONBUILD ADD [".", "/"] - -MOUNT /root/.gradle -MOUNT $GIT_SSH_KEY:/root/.ssh/id_rsa - -RUN gradle --refresh-dependencies --stacktrace clean test - -EXPORT /src/corgi-app/build/distributions/app.tar - -#=== - -FROM some-java8-image:1 - -IMPORT app.tar /opt - -CMD ["/sbin/my_init", "/opt/app/bin/app"] - -PUSH mycompany/app:$branch-$version From 7002fd1b7dada668313bb87601e8bb255825c56a Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Wed, 30 Sep 2015 10:13:37 +0300 Subject: [PATCH 082/131] lint & vet --- Makefile | 4 +- src/rocker/build/build.go | 19 +++++-- src/rocker/build/cache.go | 6 +++ src/rocker/build/client.go | 18 +++++++ src/rocker/build/commands.go | 92 +++++++++++++++++++++++++++----- src/rocker/build/dockerignore.go | 6 ++- src/rocker/build/plan.go | 2 + src/rocker/build/rockerfile.go | 4 ++ src/rocker/build/state.go | 7 +++ src/rocker/build/tar_windows.go | 2 +- src/rocker/template/template.go | 1 + 11 files changed, 140 insertions(+), 21 deletions(-) diff --git a/Makefile b/Makefile index 29dc14a1..f1093c3d 100644 --- a/Makefile +++ b/Makefile @@ -36,12 +36,10 @@ UPLOAD_CMD = $(GITHUB_RELEASE) upload \ SRCS = $(shell find . -name '*.go' | grep -v '^./vendor/') PKGS := $(foreach pkg, $(sort $(dir $(SRCS))), $(pkg)) -GOPATH = $(shell pwd):$(shell pwd)/vendor - TESTARGS ?= binary: - GOPATH=$(GOPATH) go build \ + GOPATH=$(shell pwd):$(shell pwd)/vendor go build \ -ldflags "-X main.Version=$(VERSION) -X main.GitCommit=$(GITCOMMIT) -X main.GitBranch=$(GITBRANCH) -X main.BuildTime=$(BUILDTIME)" \ -v -o bin/rocker src/cmd/rocker/main.go diff --git a/src/rocker/build/build.go b/src/rocker/build/build.go index 7827f0b3..456fe644 100644 --- a/src/rocker/build/build.go +++ b/src/rocker/build/build.go @@ -30,12 +30,20 @@ import ( ) var ( + // NoBaseImageSpecifier defines the empty image name, used in the FROM instruction NoBaseImageSpecifier = "scratch" - MountVolumeImage = "grammarly/scratch:latest" - RsyncImage = "grammarly/rsync-static:1" - ExportsPath = "/.rocker_exports" + + // MountVolumeImage used for MOUNT volume containers + MountVolumeImage = "grammarly/scratch:latest" + + // RsyncImage used for EXPORT volume containers + RsyncImage = "grammarly/rsync-static:1" + + // ExportsPath is the path within EXPORT volume containers + ExportsPath = "/.rocker_exports" ) +// Config used specify parameters for the builder in New() type Config struct { OutStream io.Writer InStream io.ReadCloser @@ -50,6 +58,7 @@ type Config struct { Push bool } +// Build is the main object that processes build type Build struct { ProducedSize int64 VirtualSize int64 @@ -65,6 +74,7 @@ type Build struct { exports []string } +// New creates the new build object func New(client Client, rockerfile *Rockerfile, cache Cache, cfg Config) *Build { b := &Build{ rockerfile: rockerfile, @@ -77,6 +87,7 @@ func New(client Client, rockerfile *Rockerfile, cache Cache, cfg Config) *Build return b } +// Run runs the build following the given Plan func (b *Build) Run(plan Plan) (err error) { for k := 0; k < len(plan); k++ { @@ -123,10 +134,12 @@ func (b *Build) Run(plan Plan) (err error) { return nil } +// GetState returns current build state object func (b *Build) GetState() State { return b.state } +// GetImageID returns last image ID produced by the build func (b *Build) GetImageID() string { return b.state.ImageID } diff --git a/src/rocker/build/cache.go b/src/rocker/build/cache.go index e20fe47d..0bf15f3d 100644 --- a/src/rocker/build/cache.go +++ b/src/rocker/build/cache.go @@ -25,22 +25,26 @@ import ( log "github.com/Sirupsen/logrus" ) +// Cache interface describes a cache backend type Cache interface { Get(s State) (s2 *State, err error) Put(s State) error Del(s State) error } +// CacheFS implements file based cache backend type CacheFS struct { root string } +// NewCacheFS creates a file based cache backend func NewCacheFS(root string) *CacheFS { return &CacheFS{ root: root, } } +// Get fetches cache func (c *CacheFS) Get(s State) (res *State, err error) { match := filepath.Join(c.root, s.ImageID) @@ -77,6 +81,7 @@ func (c *CacheFS) Get(s State) (res *State, err error) { return } +// Put stores cache func (c *CacheFS) Put(s State) error { log.Debugf("CACHE PUT %s %s %q", s.ParentID, s.ImageID, s.Commits) @@ -91,6 +96,7 @@ func (c *CacheFS) Put(s State) error { return ioutil.WriteFile(fileName, data, 0644) } +// Del deletes cache func (c *CacheFS) Del(s State) error { log.Debugf("CACHE DELETE %s %s %q", s.ParentID, s.ImageID, s.Commits) diff --git a/src/rocker/build/client.go b/src/rocker/build/client.go index 15219210..ebcd4758 100644 --- a/src/rocker/build/client.go +++ b/src/rocker/build/client.go @@ -35,6 +35,7 @@ import ( log "github.com/Sirupsen/logrus" ) +// Client interface type Client interface { InspectImage(name string) (*docker.Image, error) PullImage(name string) error @@ -51,11 +52,13 @@ type Client interface { ResolveHostPath(path string) (resultPath string, err error) } +// DockerClient implements the client that works with a docker socket type DockerClient struct { client *docker.Client auth docker.AuthConfiguration } +// NewDockerClient makes a new client that works with a docker socket func NewDockerClient(dockerClient *docker.Client, auth docker.AuthConfiguration) *DockerClient { return &DockerClient{ client: dockerClient, @@ -63,6 +66,8 @@ func NewDockerClient(dockerClient *docker.Client, auth docker.AuthConfiguration) } } +// InspectImage inspects docker image +// it does not give an error when image not found, but returns nil instead func (c *DockerClient) InspectImage(name string) (*docker.Image, error) { img, err := c.client.InspectImage(name) // We simply return nil in case image not found @@ -72,6 +77,7 @@ func (c *DockerClient) InspectImage(name string) (*docker.Image, error) { return img, err } +// PullImage pulls docker image func (c *DockerClient) PullImage(name string) error { var ( @@ -109,6 +115,7 @@ func (c *DockerClient) PullImage(name string) error { return <-errch } +// RemoveImage removes docker image func (c *DockerClient) RemoveImage(imageID string) error { log.Infof("| Remove image %.12s", imageID) @@ -119,6 +126,7 @@ func (c *DockerClient) RemoveImage(imageID string) error { return c.client.RemoveImageExtended(imageID, opts) } +// CreateContainer creates docker container func (c *DockerClient) CreateContainer(s State) (string, error) { s.Config.Image = s.ImageID @@ -147,6 +155,7 @@ func (c *DockerClient) CreateContainer(s State) (string, error) { return container.ID, nil } +// RunContainer runs docker container and optionally attaches stdin func (c *DockerClient) RunContainer(containerID string, attachStdin bool) error { var ( @@ -279,6 +288,7 @@ func (c *DockerClient) RunContainer(containerID string, attachStdin bool) error return nil } +// CommitContainer commits docker container func (c *DockerClient) CommitContainer(s State, message string) (*docker.Image, error) { commitOpts := docker.CommitContainerOptions{ Container: s.ContainerID, @@ -312,6 +322,7 @@ func (c *DockerClient) CommitContainer(s State, message string) (*docker.Image, return image, nil } +// RemoveContainer removes docker container func (c *DockerClient) RemoveContainer(containerID string) error { log.Infof("| Removing container %.12s", containerID) @@ -324,6 +335,7 @@ func (c *DockerClient) RemoveContainer(containerID string) error { return c.client.RemoveContainer(opts) } +// UploadToContainer uploads files to a docker container func (c *DockerClient) UploadToContainer(containerID string, stream io.Reader, path string) error { log.Infof("| Uploading files to container %.12s", containerID) @@ -336,6 +348,7 @@ func (c *DockerClient) UploadToContainer(containerID string, stream io.Reader, p return c.client.UploadToContainer(containerID, opts) } +// TagImage adds tag to the image func (c *DockerClient) TagImage(imageID, imageName string) error { img := imagename.NewFromString(imageName) @@ -352,6 +365,7 @@ func (c *DockerClient) TagImage(imageID, imageName string) error { return c.client.TagImage(imageID, opts) } +// PushImage pushes the image func (c *DockerClient) PushImage(imageName string) error { var ( img = imagename.NewFromString(imageName) @@ -390,10 +404,12 @@ func (c *DockerClient) PushImage(imageName string) error { return <-errch } +// ResolveHostPath proxy for the dockerclient.ResolveHostPath func (c *DockerClient) ResolveHostPath(path string) (resultPath string, err error) { return dockerclient.ResolveHostPath(path, c.client) } +// EnsureImage checks if the image exists and pulls if not func (c *DockerClient) EnsureImage(imageName string) (err error) { var img *docker.Image @@ -407,6 +423,8 @@ func (c *DockerClient) EnsureImage(imageName string) (err error) { return c.PullImage(imageName) } +// EnsureContainer checks if container with specified name exists +// and creates it otherwise func (c *DockerClient) EnsureContainer(containerName string, config *docker.Config, purpose string) (containerID string, err error) { // Check if container exists diff --git a/src/rocker/build/commands.go b/src/rocker/build/commands.go index 46442c54..3f8b667f 100644 --- a/src/rocker/build/commands.go +++ b/src/rocker/build/commands.go @@ -32,10 +32,7 @@ import ( "github.com/fsouza/go-dockerclient" ) -const ( - COMMIT_SKIP = "COMMIT_SKIP" -) - +// ConfigCommand configuration parameters for any command type ConfigCommand struct { name string args []string @@ -45,6 +42,7 @@ type ConfigCommand struct { isOnbuild bool } +// Command interface describes and command that is executed by build type Command interface { // Execute does the command execution and returns modified state. // Note that here we use State not by reference because we want @@ -59,6 +57,7 @@ type Command interface { String() string } +// NewCommand make a new command according to the configuration given func NewCommand(cfg ConfigCommand) (cmd Command, err error) { // TODO: use reflection? switch cfg.name { @@ -118,14 +117,17 @@ type CommandFrom struct { cfg ConfigCommand } +// String returns the human readable string representation of the command func (c *CommandFrom) String() string { return c.cfg.original } +// ShouldRun returns true if the command should be executed func (c *CommandFrom) ShouldRun(b *Build) (bool, error) { return true, nil } +// Execute runs the command func (c *CommandFrom) Execute(b *Build) (s State, err error) { // TODO: for "scratch" image we may use /images/create @@ -196,14 +198,17 @@ type CommandMaintainer struct { cfg ConfigCommand } +// String returns the human readable string representation of the command func (c *CommandMaintainer) String() string { return c.cfg.original } +// ShouldRun returns true if the command should be executed func (c *CommandMaintainer) ShouldRun(b *Build) (bool, error) { return true, nil } +// Execute runs the command func (c *CommandMaintainer) Execute(b *Build) (State, error) { if len(c.cfg.args) != 1 { return b.state, fmt.Errorf("MAINTAINER requires exactly one argument") @@ -214,20 +219,23 @@ func (c *CommandMaintainer) Execute(b *Build) (State, error) { return b.state, nil } -// CommandReset cleans the builder state before the next FROM +// CommandCleanup cleans the builder state before the next FROM type CommandCleanup struct { final bool tagged bool } +// String returns the human readable string representation of the command func (c *CommandCleanup) String() string { return "Cleaning up" } +// ShouldRun returns true if the command should be executed func (c *CommandCleanup) ShouldRun(b *Build) (bool, error) { return true, nil } +// Execute runs the command func (c *CommandCleanup) Execute(b *Build) (State, error) { s := b.state @@ -257,14 +265,17 @@ func (c *CommandCleanup) Execute(b *Build) (State, error) { // CommandCommit commits collected changes type CommandCommit struct{} +// String returns the human readable string representation of the command func (c *CommandCommit) String() string { return "Commit changes" } +// ShouldRun returns true if the command should be executed func (c *CommandCommit) ShouldRun(b *Build) (bool, error) { return b.state.GetCommits() != "", nil } +// Execute runs the command func (c *CommandCommit) Execute(b *Build) (s State, err error) { s = b.state @@ -338,14 +349,17 @@ type CommandRun struct { cfg ConfigCommand } +// String returns the human readable string representation of the command func (c *CommandRun) String() string { return c.cfg.original } +// ShouldRun returns true if the command should be executed func (c *CommandRun) ShouldRun(b *Build) (bool, error) { return true, nil } +// Execute runs the command func (c *CommandRun) Execute(b *Build) (s State, err error) { s = b.state @@ -396,15 +410,18 @@ type CommandAttach struct { cfg ConfigCommand } +// String returns the human readable string representation of the command func (c *CommandAttach) String() string { return c.cfg.original } +// ShouldRun returns true if the command should be executed func (c *CommandAttach) ShouldRun(b *Build) (bool, error) { // TODO: skip attach? return true, nil } +// Execute runs the command func (c *CommandAttach) Execute(b *Build) (s State, err error) { s = b.state @@ -463,14 +480,17 @@ type CommandEnv struct { cfg ConfigCommand } +// String returns the human readable string representation of the command func (c *CommandEnv) String() string { return c.cfg.original } +// ShouldRun returns true if the command should be executed func (c *CommandEnv) ShouldRun(b *Build) (bool, error) { return true, nil } +// Execute runs the command func (c *CommandEnv) Execute(b *Build) (s State, err error) { s = b.state @@ -517,14 +537,17 @@ type CommandLabel struct { cfg ConfigCommand } +// String returns the human readable string representation of the command func (c *CommandLabel) String() string { return c.cfg.original } +// ShouldRun returns true if the command should be executed func (c *CommandLabel) ShouldRun(b *Build) (bool, error) { return true, nil } +// Execute runs the command func (c *CommandLabel) Execute(b *Build) (s State, err error) { s = b.state @@ -565,14 +588,17 @@ type CommandWorkdir struct { cfg ConfigCommand } +// String returns the human readable string representation of the command func (c *CommandWorkdir) String() string { return c.cfg.original } +// ShouldRun returns true if the command should be executed func (c *CommandWorkdir) ShouldRun(b *Build) (bool, error) { return true, nil } +// Execute runs the command func (c *CommandWorkdir) Execute(b *Build) (s State, err error) { s = b.state @@ -600,14 +626,17 @@ type CommandCmd struct { cfg ConfigCommand } +// String returns the human readable string representation of the command func (c *CommandCmd) String() string { return c.cfg.original } +// ShouldRun returns true if the command should be executed func (c *CommandCmd) ShouldRun(b *Build) (bool, error) { return true, nil } +// Execute runs the command func (c *CommandCmd) Execute(b *Build) (s State, err error) { s = b.state @@ -633,14 +662,17 @@ type CommandEntrypoint struct { cfg ConfigCommand } +// String returns the human readable string representation of the command func (c *CommandEntrypoint) String() string { return c.cfg.original } +// ShouldRun returns true if the command should be executed func (c *CommandEntrypoint) ShouldRun(b *Build) (bool, error) { return true, nil } +// Execute runs the command func (c *CommandEntrypoint) Execute(b *Build) (s State, err error) { s = b.state @@ -675,14 +707,17 @@ type CommandExpose struct { cfg ConfigCommand } +// String returns the human readable string representation of the command func (c *CommandExpose) String() string { return c.cfg.original } +// ShouldRun returns true if the command should be executed func (c *CommandExpose) ShouldRun(b *Build) (bool, error) { return true, nil } +// Execute runs the command func (c *CommandExpose) Execute(b *Build) (s State, err error) { s = b.state @@ -726,14 +761,17 @@ type CommandVolume struct { cfg ConfigCommand } +// String returns the human readable string representation of the command func (c *CommandVolume) String() string { return c.cfg.original } +// ShouldRun returns true if the command should be executed func (c *CommandVolume) ShouldRun(b *Build) (bool, error) { return true, nil } +// Execute runs the command func (c *CommandVolume) Execute(b *Build) (s State, err error) { s = b.state @@ -763,14 +801,17 @@ type CommandUser struct { cfg ConfigCommand } +// String returns the human readable string representation of the command func (c *CommandUser) String() string { return c.cfg.original } +// ShouldRun returns true if the command should be executed func (c *CommandUser) ShouldRun(b *Build) (bool, error) { return true, nil } +// Execute runs the command func (c *CommandUser) Execute(b *Build) (s State, err error) { s = b.state @@ -791,14 +832,17 @@ type CommandOnbuild struct { cfg ConfigCommand } +// String returns the human readable string representation of the command func (c *CommandOnbuild) String() string { return c.cfg.original } +// ShouldRun returns true if the command should be executed func (c *CommandOnbuild) ShouldRun(b *Build) (bool, error) { return true, nil } +// Execute runs the command func (c *CommandOnbuild) Execute(b *Build) (s State, err error) { s = b.state @@ -828,14 +872,17 @@ type CommandTag struct { cfg ConfigCommand } +// String returns the human readable string representation of the command func (c *CommandTag) String() string { return c.cfg.original } +// ShouldRun returns true if the command should be executed func (c *CommandTag) ShouldRun(b *Build) (bool, error) { return true, nil } +// Execute runs the command func (c *CommandTag) Execute(b *Build) (State, error) { if len(c.cfg.args) != 1 { return b.state, fmt.Errorf("TAG requires exactly one argument") @@ -857,14 +904,17 @@ type CommandPush struct { cfg ConfigCommand } +// String returns the human readable string representation of the command func (c *CommandPush) String() string { return c.cfg.original } +// ShouldRun returns true if the command should be executed func (c *CommandPush) ShouldRun(b *Build) (bool, error) { return true, nil } +// Execute runs the command func (c *CommandPush) Execute(b *Build) (State, error) { if len(c.cfg.args) != 1 { return b.state, fmt.Errorf("PUSH requires exactly one argument") @@ -895,14 +945,17 @@ type CommandCopy struct { cfg ConfigCommand } +// String returns the human readable string representation of the command func (c *CommandCopy) String() string { return c.cfg.original } +// ShouldRun returns true if the command should be executed func (c *CommandCopy) ShouldRun(b *Build) (bool, error) { return true, nil } +// Execute runs the command func (c *CommandCopy) Execute(b *Build) (State, error) { if len(c.cfg.args) < 2 { return b.state, fmt.Errorf("COPY requires at least two arguments") @@ -916,14 +969,17 @@ type CommandAdd struct { cfg ConfigCommand } +// String returns the human readable string representation of the command func (c *CommandAdd) String() string { return c.cfg.original } +// ShouldRun returns true if the command should be executed func (c *CommandAdd) ShouldRun(b *Build) (bool, error) { return true, nil } +// Execute runs the command func (c *CommandAdd) Execute(b *Build) (State, error) { if len(c.cfg.args) < 2 { return b.state, fmt.Errorf("ADD requires at least two arguments") @@ -936,14 +992,17 @@ type CommandMount struct { cfg ConfigCommand } +// String returns the human readable string representation of the command func (c *CommandMount) String() string { return c.cfg.original } +// ShouldRun returns true if the command should be executed func (c *CommandMount) ShouldRun(b *Build) (bool, error) { return true, nil } +// Execute runs the command func (c *CommandMount) Execute(b *Build) (s State, err error) { s = b.state @@ -1011,14 +1070,17 @@ type CommandExport struct { cfg ConfigCommand } +// String returns the human readable string representation of the command func (c *CommandExport) String() string { return c.cfg.original } +// ShouldRun returns true if the command should be executed func (c *CommandExport) ShouldRun(b *Build) (bool, error) { return true, nil } +// Execute runs the command func (c *CommandExport) Execute(b *Build) (s State, err error) { s = b.state @@ -1066,13 +1128,13 @@ func (c *CommandExport) Execute(b *Build) (s State, err error) { } // Remember original stuff so we can restore it when we finished - var exportsId string + var exportsID string origState := s defer func() { s = origState - s.ExportsID = exportsId - b.exports = append(b.exports, exportsId) + s.ExportsID = exportsID + b.exports = append(b.exports, exportsID) }() // Append exports container as a volume @@ -1090,14 +1152,14 @@ func (c *CommandExport) Execute(b *Build) (s State, err error) { s.Config.Cmd = cmd s.Config.Entrypoint = []string{} - if exportsId, err = b.client.CreateContainer(s); err != nil { + if exportsID, err = b.client.CreateContainer(s); err != nil { return s, err } - defer b.client.RemoveContainer(exportsId) + defer b.client.RemoveContainer(exportsID) - log.Infof("| Running in %.12s: %s", exportsId, strings.Join(cmd, " ")) + log.Infof("| Running in %.12s: %s", exportsID, strings.Join(cmd, " ")) - if err = b.client.RunContainer(exportsId, false); err != nil { + if err = b.client.RunContainer(exportsID, false); err != nil { return s, err } @@ -1109,14 +1171,17 @@ type CommandImport struct { cfg ConfigCommand } +// String returns the human readable string representation of the command func (c *CommandImport) String() string { return c.cfg.original } +// ShouldRun returns true if the command should be executed func (c *CommandImport) ShouldRun(b *Build) (bool, error) { return true, nil } +// Execute runs the command func (c *CommandImport) Execute(b *Build) (s State, err error) { s = b.state args := c.cfg.args @@ -1208,14 +1273,17 @@ type CommandOnbuildWrap struct { cmd Command } +// String returns the human readable string representation of the command func (c *CommandOnbuildWrap) String() string { return "ONBUILD " + c.cmd.String() } +// ShouldRun returns true if the command should be executed func (c *CommandOnbuildWrap) ShouldRun(b *Build) (bool, error) { return true, nil } +// Execute runs the command func (c *CommandOnbuildWrap) Execute(b *Build) (State, error) { return c.cmd.Execute(b) } diff --git a/src/rocker/build/dockerignore.go b/src/rocker/build/dockerignore.go index 265ff5fb..31a0aaa1 100644 --- a/src/rocker/build/dockerignore.go +++ b/src/rocker/build/dockerignore.go @@ -28,9 +28,10 @@ import ( // TODO: maybe move some stuff from copy.go here var ( - DockerignoreCommendRegexp = regexp.MustCompile("\\s*#.*") + dockerignoreCommentRegexp = regexp.MustCompile("\\s*#.*") ) +// ReadDockerignoreFile reads and parses .dockerignore file func ReadDockerignoreFile(file string) ([]string, error) { fd, err := os.Open(file) if err != nil { @@ -41,6 +42,7 @@ func ReadDockerignoreFile(file string) ([]string, error) { return ReadDockerignore(fd) } +// ReadDockerignore reads and parses .dockerignore file from io.Reader func ReadDockerignore(r io.Reader) ([]string, error) { var ( scanner = bufio.NewScanner(r) @@ -50,7 +52,7 @@ func ReadDockerignore(r io.Reader) ([]string, error) { for scanner.Scan() { // Strip comments line := scanner.Text() - line = DockerignoreCommendRegexp.ReplaceAllString(line, "") + line = dockerignoreCommentRegexp.ReplaceAllString(line, "") // Eliminate leading and trailing whitespace. pattern := strings.TrimSpace(line) if pattern == "" { diff --git a/src/rocker/build/plan.go b/src/rocker/build/plan.go index dd2792fa..b2a209d7 100644 --- a/src/rocker/build/plan.go +++ b/src/rocker/build/plan.go @@ -18,8 +18,10 @@ package build import "strings" +// Plan is the list of commands to be executed sequentially by a build process type Plan []Command +// NewPlan makes a new plan out of the list of commands from a Rockerfile func NewPlan(commands []ConfigCommand, finalCleanup bool) (plan Plan, err error) { plan = Plan{} diff --git a/src/rocker/build/rockerfile.go b/src/rocker/build/rockerfile.go index d5041400..35be210d 100644 --- a/src/rocker/build/rockerfile.go +++ b/src/rocker/build/rockerfile.go @@ -27,6 +27,7 @@ import ( "strings" ) +// Rockerfile represents the data structure of a Rockerfile type Rockerfile struct { Name string Source string @@ -37,6 +38,7 @@ type Rockerfile struct { rootNode *parser.Node } +// NewRockerfileFromFile reads and parses Rockerfile from a file func NewRockerfileFromFile(name string, vars template.Vars, funs template.Funs) (r *Rockerfile, err error) { fd, err := os.Open(name) if err != nil { @@ -47,6 +49,7 @@ func NewRockerfileFromFile(name string, vars template.Vars, funs template.Funs) return NewRockerfile(name, fd, vars, funs) } +// NewRockerfile reads parses Rockerfile from an io.Reader func NewRockerfile(name string, in io.Reader, vars template.Vars, funs template.Funs) (r *Rockerfile, err error) { r = &Rockerfile{ Name: name, @@ -80,6 +83,7 @@ func NewRockerfile(name string, in io.Reader, vars template.Vars, funs template. return r, nil } +// Commands returns the list of command configurations from the Rockerfile func (r *Rockerfile) Commands() []ConfigCommand { commands := []ConfigCommand{} diff --git a/src/rocker/build/state.go b/src/rocker/build/state.go index dcd89a57..b5e884bd 100644 --- a/src/rocker/build/state.go +++ b/src/rocker/build/state.go @@ -24,6 +24,8 @@ import ( "github.com/fsouza/go-dockerclient" ) +// State is the build state +// TODO: document type State struct { Config docker.Config HostConfig docker.HostConfig @@ -40,22 +42,27 @@ type State struct { Dockerignore []string } +// NewState makes a fresh state func NewState(b *Build) State { return State{ Dockerignore: b.cfg.Dockerignore, } } +// Commit adds a commit to the current state func (s *State) Commit(msg string, args ...interface{}) *State { s.Commits = append(s.Commits, fmt.Sprintf(msg, args...)) sort.Strings(s.Commits) return s } +// GetCommits returns merged commits string func (s State) GetCommits() string { return strings.Join(s.Commits, "; ") } +// Equals returns true if the two states are equal +// NOTE: we identify unique commands by commits, so state uniqueness is simply a commit func (s State) Equals(s2 State) bool { // TODO: compare other properties? return s.GetCommits() == s2.GetCommits() diff --git a/src/rocker/build/tar_windows.go b/src/rocker/build/tar_windows.go index f2c29f37..872ec798 100644 --- a/src/rocker/build/tar_windows.go +++ b/src/rocker/build/tar_windows.go @@ -16,7 +16,7 @@ import ( "strings" ) -// canonicalTarNameForPath returns platform-specific filepath +// CanonicalTarNameForPath returns platform-specific filepath // to canonical posix-style path for tar archival. p is relative // path. func CanonicalTarNameForPath(p string) (string, error) { diff --git a/src/rocker/template/template.go b/src/rocker/template/template.go index 2a329098..54ad0195 100644 --- a/src/rocker/template/template.go +++ b/src/rocker/template/template.go @@ -32,6 +32,7 @@ import ( "github.com/kr/pretty" ) +// Funs is the list of additional helpers that may be given to the template type Funs map[string]interface{} // Process renders config through the template processor. From d133e24b2bf9ae2f1f785f2dd6ec3e5e2d5647ec Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Wed, 30 Sep 2015 10:15:31 +0300 Subject: [PATCH 083/131] remove "rocker show" --- src/cmd/rocker/main.go | 103 ----------------------------------------- 1 file changed, 103 deletions(-) diff --git a/src/cmd/rocker/main.go b/src/cmd/rocker/main.go index 1354102a..d77d9fbe 100644 --- a/src/cmd/rocker/main.go +++ b/src/cmd/rocker/main.go @@ -17,7 +17,6 @@ package main import ( - "encoding/json" "fmt" "os" "path/filepath" @@ -25,7 +24,6 @@ import ( "rocker/build" "rocker/dockerclient" - "rocker/imagename" "rocker/template" "github.com/codegangsta/cli" @@ -143,17 +141,6 @@ func main() { Action: buildCommand, Flags: buildFlags, }, - { - Name: "show", - Usage: "shows information about any image", - Action: showCommand, - Flags: []cli.Flag{ - cli.BoolFlag{ - Name: "json", - Usage: "print output in json", - }, - }, - }, { Name: "clean", Usage: "complete a task on the list", @@ -332,96 +319,6 @@ func buildCommand(c *cli.Context) { // } } -func showCommand(c *cli.Context) { - dockerClient, err := dockerclient.NewFromCli(c) - if err != nil { - log.Fatal(err) - } - - // Initialize context dir - args := c.Args() - if len(args) == 0 { - log.Fatal("Missing image argument") - } - //parse parameter to name - imageName := imagename.NewFromString(args[0]) - infos := []*build.RockerImageData{} - - if imageName.IsStrict() { - image, err := dockerClient.InspectImage(args[0]) - if err != nil && err.Error() == "no such image" { - image, err = imagename.RegistryGet(imageName) - if err != nil { - log.Fatal(err) - } - } else if err != nil { - log.Fatal(err) - } - info, err := toInfo(imageName, image) - if err != nil { - log.Fatal(err) - } - infos = append(infos, info) - } else { - images, err := imagename.RegistryListTags(imageName) - if err != nil { - log.Fatal(err) - } - - type resp struct { - name *imagename.ImageName - image *docker.Image - err error - } - chResp := make(chan resp, len(images)) - - for _, img := range images { - go func(img *imagename.ImageName) { - r := resp{name: img} - r.image, r.err = imagename.RegistryGet(img) - chResp <- r - }(img) - } - - for _ = range images { - r := <-chResp - if r.err != nil { - log.Println(r.err) - } else if info, err := toInfo(r.name, r.image); err == nil { - infos = append(infos, info) - } - } - } - - if c.Bool("json") { - res, err := json.Marshal(infos) - if err != nil { - log.Fatal(err) - } - fmt.Println(string(res)) - } else { - for _, res := range infos { - fmt.Println(res.PrettyString()) - } - } -} - -func toInfo(name *imagename.ImageName, image *docker.Image) (*build.RockerImageData, error) { - data := &build.RockerImageData{} - - if image.Config != nil { - if _, ok := image.Config.Labels["rocker-data"]; ok { - if err := json.Unmarshal([]byte(image.Config.Labels["rocker-data"]), data); err != nil { - return nil, err - } - } - data.Created = image.Created - } - - data.ImageName = name - return data, nil -} - func cleanCommand(c *cli.Context) { verbose := c.Bool("verbose") fmt.Println("verbose") From 68f2b8e356be7219d5b6b58a027287b61f591c6d Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Wed, 30 Sep 2015 10:46:19 +0300 Subject: [PATCH 084/131] also support `-D` to debug --- src/cmd/rocker/main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cmd/rocker/main.go b/src/cmd/rocker/main.go index d77d9fbe..b4ed0e98 100644 --- a/src/cmd/rocker/main.go +++ b/src/cmd/rocker/main.go @@ -69,7 +69,7 @@ func main() { app.Flags = append([]cli.Flag{ cli.BoolFlag{ - Name: "verbose, vv", + Name: "verbose, vv, D", }, cli.BoolFlag{ Name: "json", From 8452238c5ae029f302fda50f38904346b76f35b4 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Wed, 30 Sep 2015 10:47:38 +0300 Subject: [PATCH 085/131] publish artifacts --- src/cmd/rocker/main.go | 23 ++++++++++---------- src/rocker/build/build.go | 23 ++++++++++---------- src/rocker/build/build_test.go | 4 ++-- src/rocker/build/client.go | 32 +++++++++++++++++++++------- src/rocker/build/commands.go | 35 ++++++++++++++++++++++++++++++- src/rocker/build/commands_test.go | 2 +- 6 files changed, 85 insertions(+), 34 deletions(-) diff --git a/src/cmd/rocker/main.go b/src/cmd/rocker/main.go index b4ed0e98..6e156db8 100644 --- a/src/cmd/rocker/main.go +++ b/src/cmd/rocker/main.go @@ -265,17 +265,18 @@ func buildCommand(c *cli.Context) { } builder := build.New(client, rockerfile, cache, build.Config{ - InStream: os.Stdin, - OutStream: os.Stdout, - ContextDir: contextDir, - Dockerignore: dockerignore, - Pull: c.Bool("pull"), - NoGarbage: c.Bool("no-garbage"), - Attach: c.Bool("attach"), - Verbose: c.GlobalBool("verbose"), - ID: c.String("id"), - NoCache: c.Bool("no-cache"), - Push: c.Bool("push"), + InStream: os.Stdin, + OutStream: os.Stdout, + ContextDir: contextDir, + Dockerignore: dockerignore, + ArtifactsPath: c.String("artifacts-path"), + Pull: c.Bool("pull"), + NoGarbage: c.Bool("no-garbage"), + Attach: c.Bool("attach"), + Verbose: c.GlobalBool("verbose"), + ID: c.String("id"), + NoCache: c.Bool("no-cache"), + Push: c.Bool("push"), }) plan, err := build.NewPlan(rockerfile.Commands(), true) diff --git a/src/rocker/build/build.go b/src/rocker/build/build.go index 456fe644..ddffde8a 100644 --- a/src/rocker/build/build.go +++ b/src/rocker/build/build.go @@ -45,17 +45,18 @@ var ( // Config used specify parameters for the builder in New() type Config struct { - OutStream io.Writer - InStream io.ReadCloser - ContextDir string - ID string - Dockerignore []string - Pull bool - NoGarbage bool - Attach bool - Verbose bool - NoCache bool - Push bool + OutStream io.Writer + InStream io.ReadCloser + ContextDir string + ID string + Dockerignore []string + ArtifactsPath string + Pull bool + NoGarbage bool + Attach bool + Verbose bool + NoCache bool + Push bool } // Build is the main object that processes build diff --git a/src/rocker/build/build_test.go b/src/rocker/build/build_test.go index ba963a52..9c02eb71 100644 --- a/src/rocker/build/build_test.go +++ b/src/rocker/build/build_test.go @@ -74,9 +74,9 @@ func (m *MockClient) TagImage(imageID, imageName string) error { return args.Error(0) } -func (m *MockClient) PushImage(imageName string) error { +func (m *MockClient) PushImage(imageName string) (string, error) { args := m.Called(imageName) - return args.Error(0) + return args.String(0), args.Error(1) } func (m *MockClient) CreateContainer(state State) (string, error) { diff --git a/src/rocker/build/client.go b/src/rocker/build/client.go index ebcd4758..841ab6fa 100644 --- a/src/rocker/build/client.go +++ b/src/rocker/build/client.go @@ -17,11 +17,13 @@ package build import ( + "bytes" "fmt" "io" "os" "os/signal" + "regexp" "rocker/dockerclient" "rocker/imagename" @@ -41,7 +43,7 @@ type Client interface { PullImage(name string) error RemoveImage(imageID string) error TagImage(imageID, imageName string) error - PushImage(imageName string) error + PushImage(imageName string) (digest string, err error) EnsureImage(imageName string) error CreateContainer(state State) (id string, err error) RunContainer(containerID string, attachStdin bool) error @@ -58,6 +60,10 @@ type DockerClient struct { auth docker.AuthConfiguration } +var ( + captureDigest = regexp.MustCompile("digest:\\s*(sha256:[a-f0-9]{64})") +) + // NewDockerClient makes a new client that works with a docker socket func NewDockerClient(dockerClient *docker.Client, auth docker.AuthConfiguration) *DockerClient { return &DockerClient{ @@ -366,12 +372,13 @@ func (c *DockerClient) TagImage(imageID, imageName string) error { } // PushImage pushes the image -func (c *DockerClient) PushImage(imageName string) error { +func (c *DockerClient) PushImage(imageName string) (digest string, err error) { var ( - img = imagename.NewFromString(imageName) - errch = make(chan error) + img = imagename.NewFromString(imageName) + buf bytes.Buffer pipeReader, pipeWriter = io.Pipe() + outStream = io.MultiWriter(pipeWriter, &buf) def = log.StandardLogger() fdOut, isTerminalOut = term.GetFdInfo(def.Out) out = def.Out @@ -380,7 +387,7 @@ func (c *DockerClient) PushImage(imageName string) error { Name: img.NameWithRegistry(), Tag: img.GetTag(), Registry: img.Registry, - OutputStream: pipeWriter, + OutputStream: outStream, RawJSONStream: true, } ) @@ -394,14 +401,23 @@ func (c *DockerClient) PushImage(imageName string) error { log.Debugf("Push with options: %# v", opts) go func() { - errch <- jsonmessage.DisplayJSONMessagesStream(pipeReader, out, fdOut, isTerminalOut) + if err := jsonmessage.DisplayJSONMessagesStream(pipeReader, out, fdOut, isTerminalOut); err != nil { + log.Errorf("Failed to process json stream, error %s", err) + } }() if err := c.client.PushImage(opts, c.auth); err != nil { - return err + return "", err } + pipeWriter.Close() - return <-errch + // It is the best way to have pushed image digest so far + matches := captureDigest.FindStringSubmatch(buf.String()) + if len(matches) > 0 { + digest = matches[1] + } + + return digest, nil } // ResolveHostPath proxy for the dockerclient.ResolveHostPath diff --git a/src/rocker/build/commands.go b/src/rocker/build/commands.go index 3f8b667f..7062f6b8 100644 --- a/src/rocker/build/commands.go +++ b/src/rocker/build/commands.go @@ -18,10 +18,12 @@ package build import ( "fmt" + "io/ioutil" "os" "path" "path/filepath" "regexp" + "rocker/imagename" "rocker/util" "sort" "strings" @@ -30,6 +32,7 @@ import ( "github.com/docker/docker/pkg/nat" "github.com/docker/docker/pkg/units" "github.com/fsouza/go-dockerclient" + "github.com/kr/pretty" ) // ConfigCommand configuration parameters for any command @@ -933,10 +936,40 @@ func (c *CommandPush) Execute(b *Build) (State, error) { return b.state, nil } - if err := b.client.PushImage(c.cfg.args[0]); err != nil { + image := imagename.NewFromString(c.cfg.args[0]) + + digest, err := b.client.PushImage(image.String()) + if err != nil { return b.state, err } + if b.cfg.ArtifactsPath == "" { + return b.state, nil + } + + // Publish artifact files + + if err := os.MkdirAll(b.cfg.ArtifactsPath, 0755); err != nil { + return b.state, fmt.Errorf("Failed to create directory %s for the artifacts, error: %s", b.cfg.ArtifactsPath, err) + } + filePath := filepath.Join(b.cfg.ArtifactsPath, image.GetTag()) + lines := []string{ + fmt.Sprintf("Name: %s", image), + fmt.Sprintf("Tag: %s", image.GetTag()), + fmt.Sprintf("ImageID: %s", b.state.ImageID), + fmt.Sprintf("Digest: %s", digest), + fmt.Sprintf("Addressable: %s@%s", image.NameWithRegistry(), digest), + } + + content := []byte(strings.Join(lines, "\n") + "\n") + + if err := ioutil.WriteFile(filePath, content, 0644); err != nil { + return b.state, fmt.Errorf("Failed to write artifact file %s, error: %s", filePath, err) + } + + log.Infof("| Saved artifact file %s", filePath) + log.Debugf("Artifact properties: %# v", pretty.Formatter(lines)) + return b.state, nil } diff --git a/src/rocker/build/commands_test.go b/src/rocker/build/commands_test.go index 78a156bb..ce514e67 100644 --- a/src/rocker/build/commands_test.go +++ b/src/rocker/build/commands_test.go @@ -652,7 +652,7 @@ func TestCommandPush_Simple(t *testing.T) { b.state.ImageID = "123" c.On("TagImage", "123", "docker.io/grammarly/rocker:1.0").Return(nil).Once() - c.On("PushImage", "docker.io/grammarly/rocker:1.0").Return(nil).Once() + c.On("PushImage", "docker.io/grammarly/rocker:1.0").Return("sha256:fafa", nil).Once() _, err := cmd.Execute(b) if err != nil { From c3b08b826e479f1d9f9c39fb7de870723df3812b Mon Sep 17 00:00:00 2001 From: Vsevolod Polyakov Date: Thu, 1 Oct 2015 08:24:17 +0300 Subject: [PATCH 086/131] ability to create artifacts without push images to regestry --- src/rocker/build/commands.go | 64 +++++++++++++++++++----------------- 1 file changed, 34 insertions(+), 30 deletions(-) diff --git a/src/rocker/build/commands.go b/src/rocker/build/commands.go index 7062f6b8..e0251d89 100644 --- a/src/rocker/build/commands.go +++ b/src/rocker/build/commands.go @@ -931,45 +931,49 @@ func (c *CommandPush) Execute(b *Build) (State, error) { return b.state, err } - if !b.cfg.Push { - log.Infof("| Don't push. Pass --push flag to actually push to the registry") - return b.state, nil - } - - image := imagename.NewFromString(c.cfg.args[0]) - - digest, err := b.client.PushImage(image.String()) - if err != nil { - return b.state, err + var image *imagename.ImageName + var lines []string + + if b.cfg.Push || b.cfg.ArtifactsPath != "" { + image = imagename.NewFromString(c.cfg.args[0]) + lines = []string{ + fmt.Sprintf("Name: %s", image), + fmt.Sprintf("Tag: %s", image.GetTag()), + fmt.Sprintf("ImageID: %s", b.state.ImageID), + } } - if b.cfg.ArtifactsPath == "" { - return b.state, nil + // push image and add some lines to artifacts + if b.cfg.Push { + digest, err := b.client.PushImage(image.String()) + if err != nil { + return b.state, err + } + lines = append( + lines, + fmt.Sprintf("Digest: %s", digest), + fmt.Sprintf("Addressable: %s@%s", image.NameWithRegistry(), digest), + ) + } else { + log.Infof("| Don't push. Pass --push flag to actually push to the registry") } // Publish artifact files + if b.cfg.ArtifactsPath != "" { + if err := os.MkdirAll(b.cfg.ArtifactsPath, 0755); err != nil { + return b.state, fmt.Errorf("Failed to create directory %s for the artifacts, error: %s", b.cfg.ArtifactsPath, err) + } + filePath := filepath.Join(b.cfg.ArtifactsPath, image.GetTag()) - if err := os.MkdirAll(b.cfg.ArtifactsPath, 0755); err != nil { - return b.state, fmt.Errorf("Failed to create directory %s for the artifacts, error: %s", b.cfg.ArtifactsPath, err) - } - filePath := filepath.Join(b.cfg.ArtifactsPath, image.GetTag()) - lines := []string{ - fmt.Sprintf("Name: %s", image), - fmt.Sprintf("Tag: %s", image.GetTag()), - fmt.Sprintf("ImageID: %s", b.state.ImageID), - fmt.Sprintf("Digest: %s", digest), - fmt.Sprintf("Addressable: %s@%s", image.NameWithRegistry(), digest), - } - - content := []byte(strings.Join(lines, "\n") + "\n") + content := []byte(strings.Join(lines, "\n") + "\n") - if err := ioutil.WriteFile(filePath, content, 0644); err != nil { - return b.state, fmt.Errorf("Failed to write artifact file %s, error: %s", filePath, err) + if err := ioutil.WriteFile(filePath, content, 0644); err != nil { + return b.state, fmt.Errorf("Failed to write artifact file %s, error: %s", filePath, err) + } + log.Infof("| Saved artifact file %s", filePath) + log.Debugf("Artifact properties: %# v", pretty.Formatter(lines)) } - log.Infof("| Saved artifact file %s", filePath) - log.Debugf("Artifact properties: %# v", pretty.Formatter(lines)) - return b.state, nil } From 1d306b6e86280082ecb867842daa9f88ee883177 Mon Sep 17 00:00:00 2001 From: Vsevolod Polyakov Date: Thu, 1 Oct 2015 08:56:15 +0300 Subject: [PATCH 087/131] refactored PUSH artifacts and add Pushed property --- src/rocker/build/commands.go | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/src/rocker/build/commands.go b/src/rocker/build/commands.go index e0251d89..88dfa735 100644 --- a/src/rocker/build/commands.go +++ b/src/rocker/build/commands.go @@ -931,16 +931,11 @@ func (c *CommandPush) Execute(b *Build) (State, error) { return b.state, err } - var image *imagename.ImageName - var lines []string - - if b.cfg.Push || b.cfg.ArtifactsPath != "" { - image = imagename.NewFromString(c.cfg.args[0]) - lines = []string{ - fmt.Sprintf("Name: %s", image), - fmt.Sprintf("Tag: %s", image.GetTag()), - fmt.Sprintf("ImageID: %s", b.state.ImageID), - } + image := imagename.NewFromString(c.cfg.args[0]) + artifactProps := []string{ + fmt.Sprintf("Name: %s", image), + fmt.Sprintf("Tag: %s", image.GetTag()), + fmt.Sprintf("ImageID: %s", b.state.ImageID), } // push image and add some lines to artifacts @@ -949,8 +944,8 @@ func (c *CommandPush) Execute(b *Build) (State, error) { if err != nil { return b.state, err } - lines = append( - lines, + artifactProps = append( + artifactProps, fmt.Sprintf("Digest: %s", digest), fmt.Sprintf("Addressable: %s@%s", image.NameWithRegistry(), digest), ) @@ -960,18 +955,22 @@ func (c *CommandPush) Execute(b *Build) (State, error) { // Publish artifact files if b.cfg.ArtifactsPath != "" { + artifactProps = append( + artifactProps, + fmt.Sprintf("Pushed: %t", b.cfg.Push), + ) if err := os.MkdirAll(b.cfg.ArtifactsPath, 0755); err != nil { return b.state, fmt.Errorf("Failed to create directory %s for the artifacts, error: %s", b.cfg.ArtifactsPath, err) } filePath := filepath.Join(b.cfg.ArtifactsPath, image.GetTag()) - content := []byte(strings.Join(lines, "\n") + "\n") + content := []byte(strings.Join(artifactProps, "\n") + "\n") if err := ioutil.WriteFile(filePath, content, 0644); err != nil { return b.state, fmt.Errorf("Failed to write artifact file %s, error: %s", filePath, err) } log.Infof("| Saved artifact file %s", filePath) - log.Debugf("Artifact properties: %# v", pretty.Formatter(lines)) + log.Debugf("Artifact properties: %# v", pretty.Formatter(artifactProps)) } return b.state, nil From de13cfcdff5d7caa5a27425549b414f7e0bc6c38 Mon Sep 17 00:00:00 2001 From: Vsevolod Polyakov Date: Thu, 1 Oct 2015 09:22:02 +0300 Subject: [PATCH 088/131] refactor CommandPush artifact saving --- src/rocker/build/commands.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/rocker/build/commands.go b/src/rocker/build/commands.go index 88dfa735..33a4fcdb 100644 --- a/src/rocker/build/commands.go +++ b/src/rocker/build/commands.go @@ -934,6 +934,7 @@ func (c *CommandPush) Execute(b *Build) (State, error) { image := imagename.NewFromString(c.cfg.args[0]) artifactProps := []string{ fmt.Sprintf("Name: %s", image), + fmt.Sprintf("Pushed: %t", b.cfg.Push), fmt.Sprintf("Tag: %s", image.GetTag()), fmt.Sprintf("ImageID: %s", b.state.ImageID), } @@ -955,10 +956,6 @@ func (c *CommandPush) Execute(b *Build) (State, error) { // Publish artifact files if b.cfg.ArtifactsPath != "" { - artifactProps = append( - artifactProps, - fmt.Sprintf("Pushed: %t", b.cfg.Push), - ) if err := os.MkdirAll(b.cfg.ArtifactsPath, 0755); err != nil { return b.state, fmt.Errorf("Failed to create directory %s for the artifacts, error: %s", b.cfg.ArtifactsPath, err) } From abca3443c5ffcc8a0832d0662d30f704aeee924d Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Thu, 1 Oct 2015 09:20:41 +0300 Subject: [PATCH 089/131] better log text formatter for non-terminal --- src/cmd/rocker/main.go | 29 +++++- src/rocker/build/test_formatter.go | 162 +++++++++++++++++++++++++++++ 2 files changed, 188 insertions(+), 3 deletions(-) create mode 100644 src/rocker/build/test_formatter.go diff --git a/src/cmd/rocker/main.go b/src/cmd/rocker/main.go index 6e156db8..c807fdce 100644 --- a/src/cmd/rocker/main.go +++ b/src/cmd/rocker/main.go @@ -28,6 +28,7 @@ import ( "github.com/codegangsta/cli" "github.com/docker/docker/pkg/units" + "github.com/fatih/color" "github.com/fsouza/go-dockerclient" log "github.com/Sirupsen/logrus" @@ -74,6 +75,9 @@ func main() { cli.BoolFlag{ Name: "json", }, + cli.BoolTFlag{ + Name: "colors", + }, }, dockerclient.GlobalCliParams()...) buildFlags := []cli.Flag{ @@ -327,12 +331,31 @@ func cleanCommand(c *cli.Context) { } func initLogs(ctx *cli.Context) { + logger := log.StandardLogger() + if ctx.GlobalBool("verbose") { - log.SetLevel(log.DebugLevel) + logger.Level = log.DebugLevel + } + + var ( + isTerm = log.IsTerminal() + json = ctx.GlobalBool("json") + useColors = isTerm && !json + ) + + if ctx.GlobalIsSet("colors") { + useColors = ctx.GlobalBool("colors") } - if ctx.GlobalBool("json") { - log.SetFormatter(&log.JSONFormatter{}) + color.NoColor = !useColors + + if json { + logger.Formatter = &log.JSONFormatter{} + } else { + formatter := &build.TextFormatter{} + formatter.DisableColors = !useColors + + logger.Formatter = formatter } } diff --git a/src/rocker/build/test_formatter.go b/src/rocker/build/test_formatter.go new file mode 100644 index 00000000..67ccedf2 --- /dev/null +++ b/src/rocker/build/test_formatter.go @@ -0,0 +1,162 @@ +// The MIT License (MIT) +// Copyright (c) 2014 Simon Eskildsen +// NOTE: modified to support no-color mode that is more human readable + +package build + +import ( + "bytes" + "fmt" + "runtime" + "sort" + "strings" + "time" + + log "github.com/Sirupsen/logrus" +) + +const ( + nocolor = 0 + red = 31 + green = 32 + yellow = 33 + blue = 34 + gray = 37 +) + +var ( + baseTimestamp time.Time + isTerminal bool +) + +func init() { + baseTimestamp = time.Now() + isTerminal = log.IsTerminal() +} + +func miniTS() int { + return int(time.Since(baseTimestamp) / time.Second) +} + +type TextFormatter struct { + // Set to true to bypass checking for a TTY before outputting colors. + ForceColors bool + + // Force disabling colors. + DisableColors bool + + // Disable timestamp logging. useful when output is redirected to logging + // system that already adds timestamps. + DisableTimestamp bool + + // Enable logging the full timestamp when a TTY is attached instead of just + // the time passed since beginning of execution. + FullTimestamp bool + + // TimestampFormat to use for display when a full timestamp is printed + TimestampFormat string + + // The fields are sorted by default for a consistent output. For applications + // that log extremely frequently and don't use the JSON formatter this may not + // be desired. + DisableSorting bool +} + +func (f *TextFormatter) Format(entry *log.Entry) ([]byte, error) { + var keys []string = make([]string, 0, len(entry.Data)) + for k := range entry.Data { + keys = append(keys, k) + } + + if !f.DisableSorting { + sort.Strings(keys) + } + + b := &bytes.Buffer{} + + prefixFieldClashes(entry.Data) + + isColorTerminal := isTerminal && (runtime.GOOS != "windows") + isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors + + if f.TimestampFormat == "" { + f.TimestampFormat = log.DefaultTimestampFormat + } + if isColored { + f.printColored(b, entry, keys) + } else { + f.printUncolored(b, entry, keys) + } + + b.WriteByte('\n') + return b.Bytes(), nil +} + +func (f *TextFormatter) printColored(b *bytes.Buffer, entry *log.Entry, keys []string) { + var levelColor int + switch entry.Level { + case log.DebugLevel: + levelColor = gray + case log.WarnLevel: + levelColor = yellow + case log.ErrorLevel, log.FatalLevel, log.PanicLevel: + levelColor = red + default: + levelColor = blue + } + + levelText := strings.ToUpper(entry.Level.String())[0:4] + + if !f.FullTimestamp { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message) + } else { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(f.TimestampFormat), entry.Message) + } + for _, k := range keys { + v := entry.Data[k] + fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%+v", levelColor, k, v) + } +} + +func (f *TextFormatter) printUncolored(b *bytes.Buffer, entry *log.Entry, keys []string) { + levelText := strings.ToUpper(entry.Level.String())[0:4] + + if !f.FullTimestamp { + fmt.Fprintf(b, "%s[%04d] %-44s ", levelText, miniTS(), entry.Message) + } else { + fmt.Fprintf(b, "%s[%s] %-44s ", levelText, entry.Time.Format(f.TimestampFormat), entry.Message) + } + for _, k := range keys { + v := entry.Data[k] + fmt.Fprintf(b, " %s=%+v", k, v) + } +} + +// This is to not silently overwrite `time`, `msg` and `level` fields when +// dumping it. If this code wasn't there doing: +// +// logrus.WithField("level", 1).Info("hello") +// +// Would just silently drop the user provided level. Instead with this code +// it'll logged as: +// +// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} +// +// It's not exported because it's still using Data in an opinionated way. It's to +// avoid code duplication between the two default formatters. +func prefixFieldClashes(data log.Fields) { + _, ok := data["time"] + if ok { + data["fields.time"] = data["time"] + } + + _, ok = data["msg"] + if ok { + data["fields.msg"] = data["msg"] + } + + _, ok = data["level"] + if ok { + data["fields.level"] = data["level"] + } +} From 69cabb76b674af02c0b52b517a3e735aa3766579 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Thu, 1 Oct 2015 09:46:57 +0300 Subject: [PATCH 090/131] fix CommandCommit shadowing error in defer --- src/rocker/build/commands.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rocker/build/commands.go b/src/rocker/build/commands.go index 33a4fcdb..21bbe087 100644 --- a/src/rocker/build/commands.go +++ b/src/rocker/build/commands.go @@ -321,7 +321,7 @@ func (c *CommandCommit) Execute(b *Build) (s State, err error) { defer func(id string) { s.Commits = []string{} - if err = b.client.RemoveContainer(id); err != nil { + if err := b.client.RemoveContainer(id); err != nil { log.Errorf("Failed to remove temporary container %.12s, error: %s", id, err) } }(s.ContainerID) From 9d164284e488be20b6c0c3c88ae06fef81b96bc4 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Thu, 1 Oct 2015 09:47:20 +0300 Subject: [PATCH 091/131] fix CommandCommit not returning cache put error --- src/rocker/build/commands.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/rocker/build/commands.go b/src/rocker/build/commands.go index 21bbe087..ecc8d016 100644 --- a/src/rocker/build/commands.go +++ b/src/rocker/build/commands.go @@ -337,7 +337,9 @@ func (c *CommandCommit) Execute(b *Build) (s State, err error) { s.ProducedImage = true if b.cache != nil { - b.cache.Put(s) + if err := b.cache.Put(s); err != nil { + return s, err + } } // Store some stuff to the build From 100a7f8b637248aa4425a5d6cbbee3424c8a802c Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Thu, 1 Oct 2015 09:47:38 +0300 Subject: [PATCH 092/131] configurable cache directory --- src/cmd/rocker/main.go | 13 +++++++++++-- src/rocker/util/filepath.go | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 44 insertions(+), 2 deletions(-) diff --git a/src/cmd/rocker/main.go b/src/cmd/rocker/main.go index c807fdce..25ce6bb4 100644 --- a/src/cmd/rocker/main.go +++ b/src/cmd/rocker/main.go @@ -25,6 +25,7 @@ import ( "rocker/build" "rocker/dockerclient" "rocker/template" + "rocker/util" "github.com/codegangsta/cli" "github.com/docker/docker/pkg/units" @@ -100,6 +101,11 @@ func main() { Name: "no-cache", Usage: "supresses cache for docker builds", }, + cli.StringFlag{ + Name: "cache-dir", + Value: "~/.rocker_cache", + Usage: "Set the directory where the cache will be stored", + }, cli.BoolFlag{ Name: "no-reuse", Usage: "suppresses reuse for all the volumes in the build", @@ -264,8 +270,11 @@ func buildCommand(c *cli.Context) { var cache build.Cache if !c.Bool("no-cache") { - // TODO: configurable cache dir - cache = build.NewCacheFS(os.Getenv("HOME") + "/.rocker_cache") + cacheDir, err := util.MakeAbsolute(c.String("cache-dir")) + if err != nil { + log.Fatal(err) + } + cache = build.NewCacheFS(cacheDir) } builder := build.New(client, rockerfile, cache, build.Config{ diff --git a/src/rocker/util/filepath.go b/src/rocker/util/filepath.go index 346138e3..2e02d92c 100644 --- a/src/rocker/util/filepath.go +++ b/src/rocker/util/filepath.go @@ -18,7 +18,10 @@ package util import ( "fmt" + "os" + "os/user" "path" + "path/filepath" "strings" ) @@ -43,3 +46,33 @@ func ResolvePath(baseDir, subPath string) (resultPath string, err error) { return resultPath, nil } + +// MakeAbsolute makes any path absolute, either according to a HOME or from a working directory +func MakeAbsolute(path string) (result string, err error) { + result = filepath.Clean(path) + if filepath.IsAbs(result) { + return result, nil + } + + if strings.HasPrefix(result, "~/") || result == "~" { + home := os.Getenv("HOME") + + // fallback to system user info + if home == "" { + usr, err := user.Current() + if err != nil { + return "", err + } + home = usr.HomeDir + } + + return home + result[1:], nil + } + + wd, err := os.Getwd() + if err != nil { + return "", err + } + + return filepath.Join(wd, path), nil +} From af7c026f930783c0c205fba6f701227262420165 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Thu, 1 Oct 2015 09:55:49 +0300 Subject: [PATCH 093/131] text_formatter: satisfy linter --- src/rocker/build/{test_formatter.go => text_formatter.go} | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) rename src/rocker/build/{test_formatter.go => text_formatter.go} (93%) diff --git a/src/rocker/build/test_formatter.go b/src/rocker/build/text_formatter.go similarity index 93% rename from src/rocker/build/test_formatter.go rename to src/rocker/build/text_formatter.go index 67ccedf2..13d928da 100644 --- a/src/rocker/build/test_formatter.go +++ b/src/rocker/build/text_formatter.go @@ -38,6 +38,7 @@ func miniTS() int { return int(time.Since(baseTimestamp) / time.Second) } +// TextFormatter is a formatter for logrus that can print colored and uncolored human readable log messages type TextFormatter struct { // Set to true to bypass checking for a TTY before outputting colors. ForceColors bool @@ -62,8 +63,10 @@ type TextFormatter struct { DisableSorting bool } +// Format formats log message string, it checks if the output should be colored +// and doest a particular formatting func (f *TextFormatter) Format(entry *log.Entry) ([]byte, error) { - var keys []string = make([]string, 0, len(entry.Data)) + var keys = make([]string, 0, len(entry.Data)) for k := range entry.Data { keys = append(keys, k) } From d971358d309985d33b5a9596de498227d663f5b7 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Thu, 1 Oct 2015 11:05:36 +0300 Subject: [PATCH 094/131] Make non cacheable struct in state So things such as .dockerignore will not be overridden by cached commands that carry the old dockerignore content --- src/rocker/build/build.go | 13 +++++------ src/rocker/build/client.go | 2 +- src/rocker/build/commands.go | 36 +++++++++++++++---------------- src/rocker/build/commands_test.go | 10 ++++----- src/rocker/build/copy.go | 6 +++--- src/rocker/build/state.go | 32 ++++++++++++++++++--------- 6 files changed, 56 insertions(+), 43 deletions(-) diff --git a/src/rocker/build/build.go b/src/rocker/build/build.go index ddffde8a..a8cbacd9 100644 --- a/src/rocker/build/build.go +++ b/src/rocker/build/build.go @@ -146,7 +146,7 @@ func (b *Build) GetImageID() string { } func (b *Build) probeCache(s State) (cachedState State, hit bool, err error) { - if b.cache == nil || s.CacheBusted { + if b.cache == nil || s.NoCache.CacheBusted { return s, false, nil } @@ -155,7 +155,7 @@ func (b *Build) probeCache(s State) (cachedState State, hit bool, err error) { return s, false, err } if s2 == nil { - s.CacheBusted = true + s.NoCache.CacheBusted = true log.Info(color.New(color.FgYellow).SprintFunc()("| Not cached")) return s, false, nil } @@ -166,7 +166,7 @@ func (b *Build) probeCache(s State) (cachedState State, hit bool, err error) { } if img == nil { defer b.cache.Del(*s2) - s.CacheBusted = true + s.NoCache.CacheBusted = true log.Info(color.New(color.FgYellow).SprintFunc()("| Not cached")) return s, false, nil } @@ -184,9 +184,10 @@ func (b *Build) probeCache(s State) (cachedState State, hit bool, err error) { b.ProducedSize += img.Size b.VirtualSize = img.VirtualSize - // TODO: maybe move somewhere - s2.Commits = []string{} - s2.CacheBusted = false + // Keep items that should not be cached from the previous state + s2.NoCache = s.NoCache + // We don't want commits to go through the cache + s2.CleanCommits() return *s2, true, nil } diff --git a/src/rocker/build/client.go b/src/rocker/build/client.go index 841ab6fa..837b412c 100644 --- a/src/rocker/build/client.go +++ b/src/rocker/build/client.go @@ -297,7 +297,7 @@ func (c *DockerClient) RunContainer(containerID string, attachStdin bool) error // CommitContainer commits docker container func (c *DockerClient) CommitContainer(s State, message string) (*docker.Image, error) { commitOpts := docker.CommitContainerOptions{ - Container: s.ContainerID, + Container: s.NoCache.ContainerID, Message: message, Run: &s.Config, } diff --git a/src/rocker/build/commands.go b/src/rocker/build/commands.go index ecc8d016..d252eefd 100644 --- a/src/rocker/build/commands.go +++ b/src/rocker/build/commands.go @@ -253,7 +253,7 @@ func (c *CommandCleanup) Execute(b *Build) (State, error) { s = NewState(b) // Keep some stuff between froms - s.ExportsID = dirtyState.ExportsID + s.NoCache.ExportsID = dirtyState.NoCache.ExportsID // For final cleanup we want to keep imageID if c.final { @@ -292,12 +292,12 @@ func (c *CommandCommit) Execute(b *Build) (s State, err error) { } // TODO: ? - // if len(commits) == 0 && s.ContainerID == "" { log.Infof("| Skip") + // if len(commits) == 0 && s.NoCache.ContainerID == "" { log.Infof("| Skip") // TODO: verify that we need to check cache in commit only for // a non-container actions - if s.ContainerID == "" { + if s.NoCache.ContainerID == "" { // Check cache var hit bool @@ -312,7 +312,7 @@ func (c *CommandCommit) Execute(b *Build) (s State, err error) { origCmd := s.Config.Cmd s.Config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + commits} - if s.ContainerID, err = b.client.CreateContainer(s); err != nil { + if s.NoCache.ContainerID, err = b.client.CreateContainer(s); err != nil { return s, err } @@ -320,18 +320,18 @@ func (c *CommandCommit) Execute(b *Build) (s State, err error) { } defer func(id string) { - s.Commits = []string{} + s.CleanCommits() if err := b.client.RemoveContainer(id); err != nil { log.Errorf("Failed to remove temporary container %.12s, error: %s", id, err) } - }(s.ContainerID) + }(s.NoCache.ContainerID) var img *docker.Image if img, err = b.client.CommitContainer(s, commits); err != nil { return s, err } - s.ContainerID = "" + s.NoCache.ContainerID = "" s.ParentID = s.ImageID s.ImageID = img.ID s.ProducedImage = true @@ -395,12 +395,12 @@ func (c *CommandRun) Execute(b *Build) (s State, err error) { origCmd := s.Config.Cmd s.Config.Cmd = cmd - if s.ContainerID, err = b.client.CreateContainer(s); err != nil { + if s.NoCache.ContainerID, err = b.client.CreateContainer(s); err != nil { return s, err } - if err = b.client.RunContainer(s.ContainerID, false); err != nil { - b.client.RemoveContainer(s.ContainerID) + if err = b.client.RunContainer(s.NoCache.ContainerID, false); err != nil { + b.client.RemoveContainer(s.NoCache.ContainerID) return s, err } @@ -468,12 +468,12 @@ func (c *CommandAttach) Execute(b *Build) (s State, err error) { s.Config.AttachStderr = true s.Config.AttachStdout = true - if s.ContainerID, err = b.client.CreateContainer(s); err != nil { + if s.NoCache.ContainerID, err = b.client.CreateContainer(s); err != nil { return s, err } - if err = b.client.RunContainer(s.ContainerID, true); err != nil { - b.client.RemoveContainer(s.ContainerID) + if err = b.client.RunContainer(s.NoCache.ContainerID, true); err != nil { + b.client.RemoveContainer(s.NoCache.ContainerID) return s, err } @@ -656,7 +656,7 @@ func (c *CommandCmd) Execute(b *Build) (s State, err error) { s.Commit(fmt.Sprintf("CMD %q", cmd)) if len(c.cfg.args) != 0 { - s.CmdSet = true + s.NoCache.CmdSet = true } return s, nil @@ -700,7 +700,7 @@ func (c *CommandEntrypoint) Execute(b *Build) (s State, err error) { // TODO: test this // when setting the entrypoint if a CMD was not explicitly set then // set the command to nil - if !s.CmdSet { + if !s.NoCache.CmdSet { s.Config.Cmd = nil } @@ -1158,7 +1158,7 @@ func (c *CommandExport) Execute(b *Build) (s State, err error) { return s, err } if hit { - b.exports = append(b.exports, s.ExportsID) + b.exports = append(b.exports, s.NoCache.ExportsID) return s, nil } @@ -1168,7 +1168,7 @@ func (c *CommandExport) Execute(b *Build) (s State, err error) { defer func() { s = origState - s.ExportsID = exportsID + s.NoCache.ExportsID = exportsID b.exports = append(b.exports, exportsID) }() @@ -1271,7 +1271,7 @@ func (c *CommandImport) Execute(b *Build) (s State, err error) { defer func() { s = origState - s.ContainerID = importID + s.NoCache.ContainerID = importID }() cmd := []string{"/opt/rsync/bin/rsync", "-a"} diff --git a/src/rocker/build/commands_test.go b/src/rocker/build/commands_test.go index ce514e67..5f225f95 100644 --- a/src/rocker/build/commands_test.go +++ b/src/rocker/build/commands_test.go @@ -151,7 +151,7 @@ func TestCommandRun_Simple(t *testing.T) { assert.Equal(t, origCmd, b.state.Config.Cmd) assert.Equal(t, origCmd, state.Config.Cmd) assert.Equal(t, "123", state.ImageID) - assert.Equal(t, "456", state.ContainerID) + assert.Equal(t, "456", state.NoCache.ContainerID) } // =========== Testing COMMIT =========== @@ -162,7 +162,7 @@ func TestCommandCommit_Simple(t *testing.T) { resultImage := &docker.Image{ID: "789"} b.state.ImageID = "123" - b.state.ContainerID = "456" + b.state.NoCache.ContainerID = "456" b.state.Commit("a").Commit("b") c.On("CommitContainer", mock.AnythingOfType("State"), "a; b").Return(resultImage, nil).Once() @@ -178,7 +178,7 @@ func TestCommandCommit_Simple(t *testing.T) { assert.Equal(t, "", state.GetCommits()) assert.Equal(t, []string(nil), state.Config.Cmd) assert.Equal(t, "789", state.ImageID) - assert.Equal(t, "", state.ContainerID) + assert.Equal(t, "", state.NoCache.ContainerID) } func TestCommandCommit_NoContainer(t *testing.T) { @@ -206,7 +206,7 @@ func TestCommandCommit_NoContainer(t *testing.T) { assert.Equal(t, "a; b", b.state.GetCommits()) assert.Equal(t, "", state.GetCommits()) assert.Equal(t, "789", state.ImageID) - assert.Equal(t, "", state.ContainerID) + assert.Equal(t, "", state.NoCache.ContainerID) } func TestCommandCommit_NoCommitMsgs(t *testing.T) { @@ -589,7 +589,7 @@ func TestCommandCopy_Simple(t *testing.T) { t.Logf("state: %# v", pretty.Formatter(state)) c.AssertExpectations(t) - assert.Equal(t, "456", state.ContainerID) + assert.Equal(t, "456", state.NoCache.ContainerID) } // =========== Testing TAG =========== diff --git a/src/rocker/build/copy.go b/src/rocker/build/copy.go index 653af545..98aeb567 100644 --- a/src/rocker/build/copy.go +++ b/src/rocker/build/copy.go @@ -64,7 +64,7 @@ func copyFiles(b *Build, args []string, cmdName string) (s State, err error) { src = args[0 : len(args)-1] dest = filepath.FromSlash(args[len(args)-1]) // last one is always the dest u *upload - excludes = s.Dockerignore + excludes = s.NoCache.Dockerignore ) // If destination is not a directory (no leading slash) @@ -117,7 +117,7 @@ func copyFiles(b *Build, args []string, cmdName string) (s State, err error) { origCmd := s.Config.Cmd s.Config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + message} - if s.ContainerID, err = b.client.CreateContainer(s); err != nil { + if s.NoCache.ContainerID, err = b.client.CreateContainer(s); err != nil { return s, err } @@ -131,7 +131,7 @@ func copyFiles(b *Build, args []string, cmdName string) (s State, err error) { // Copy to "/" because we made the prefix inside the tar archive // Do that because we are not able to reliably create directories inside the container - if err = b.client.UploadToContainer(s.ContainerID, u.tar, "/"); err != nil { + if err = b.client.UploadToContainer(s.NoCache.ContainerID, u.tar, "/"); err != nil { return s, err } diff --git a/src/rocker/build/state.go b/src/rocker/build/state.go index b5e884bd..c9a20707 100644 --- a/src/rocker/build/state.go +++ b/src/rocker/build/state.go @@ -28,25 +28,31 @@ import ( // TODO: document type State struct { Config docker.Config - HostConfig docker.HostConfig + HostConfig docker.HostConfig // TODO: move to NoCache? ImageID string ParentID string - ContainerID string - ExportsID string - Commits []string NoBaseImage bool ProducedImage bool - CmdSet bool - CacheBusted bool InjectCommands []string - Dockerignore []string + Commits []string + + NoCache StateNoCache +} + +// StateNoCache is a struct that cannot be overridden by a cached item +type StateNoCache struct { + Dockerignore []string + CacheBusted bool + CmdSet bool + ExportsID string + ContainerID string } // NewState makes a fresh state func NewState(b *Build) State { - return State{ - Dockerignore: b.cfg.Dockerignore, - } + s := State{} + s.NoCache.Dockerignore = b.cfg.Dockerignore + return s } // Commit adds a commit to the current state @@ -56,6 +62,12 @@ func (s *State) Commit(msg string, args ...interface{}) *State { return s } +// CleanCommits resets the commits struct +func (s *State) CleanCommits() *State { + s.Commits = []string{} + return s +} + // GetCommits returns merged commits string func (s State) GetCommits() string { return strings.Join(s.Commits, "; ") From 643f268f53bdce606ecb55b690c142e8b80ba827 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Thu, 1 Oct 2015 11:57:13 +0300 Subject: [PATCH 095/131] state ExportsID should be taken from cache --- src/rocker/build/commands.go | 6 +++--- src/rocker/build/state.go | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/rocker/build/commands.go b/src/rocker/build/commands.go index d252eefd..eab719ba 100644 --- a/src/rocker/build/commands.go +++ b/src/rocker/build/commands.go @@ -253,7 +253,7 @@ func (c *CommandCleanup) Execute(b *Build) (State, error) { s = NewState(b) // Keep some stuff between froms - s.NoCache.ExportsID = dirtyState.NoCache.ExportsID + s.ExportsID = dirtyState.ExportsID // For final cleanup we want to keep imageID if c.final { @@ -1158,7 +1158,7 @@ func (c *CommandExport) Execute(b *Build) (s State, err error) { return s, err } if hit { - b.exports = append(b.exports, s.NoCache.ExportsID) + b.exports = append(b.exports, s.ExportsID) return s, nil } @@ -1168,7 +1168,7 @@ func (c *CommandExport) Execute(b *Build) (s State, err error) { defer func() { s = origState - s.NoCache.ExportsID = exportsID + s.ExportsID = exportsID b.exports = append(b.exports, exportsID) }() diff --git a/src/rocker/build/state.go b/src/rocker/build/state.go index c9a20707..da2ba288 100644 --- a/src/rocker/build/state.go +++ b/src/rocker/build/state.go @@ -31,6 +31,7 @@ type State struct { HostConfig docker.HostConfig // TODO: move to NoCache? ImageID string ParentID string + ExportsID string NoBaseImage bool ProducedImage bool InjectCommands []string @@ -44,7 +45,6 @@ type StateNoCache struct { Dockerignore []string CacheBusted bool CmdSet bool - ExportsID string ContainerID string } From b283f0843843df8e465ce24c472a0c0e3fca100d Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Thu, 1 Oct 2015 12:16:28 +0300 Subject: [PATCH 096/131] properly reset entrypoint --- src/rocker/build/commands.go | 2 +- src/rocker/build/commands_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rocker/build/commands.go b/src/rocker/build/commands.go index eab719ba..6749c00b 100644 --- a/src/rocker/build/commands.go +++ b/src/rocker/build/commands.go @@ -689,7 +689,7 @@ func (c *CommandEntrypoint) Execute(b *Build) (s State, err error) { s.Config.Entrypoint = parsed case len(parsed) == 0: // ENTRYPOINT [] - s.Config.Entrypoint = nil + s.Config.Entrypoint = []string{} default: // ENTRYPOINT echo hi s.Config.Entrypoint = []string{"/bin/sh", "-c", parsed[0]} diff --git a/src/rocker/build/commands_test.go b/src/rocker/build/commands_test.go index 5f225f95..0315d3a6 100644 --- a/src/rocker/build/commands_test.go +++ b/src/rocker/build/commands_test.go @@ -442,7 +442,7 @@ func TestCommandEntrypoint_Remove(t *testing.T) { t.Fatal(err) } - assert.Equal(t, []string(nil), state.Config.Entrypoint) + assert.Equal(t, []string{}, state.Config.Entrypoint) } // =========== Testing EXPOSE =========== From ccd809ffe269ba3bd581534ade32b2fafd9989ef Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Thu, 1 Oct 2015 12:17:09 +0300 Subject: [PATCH 097/131] param -reload-cache and hit the latest cache match entry --- src/cmd/rocker/main.go | 5 +++++ src/rocker/build/build.go | 8 ++++++++ src/rocker/build/cache.go | 12 ++++++------ 3 files changed, 19 insertions(+), 6 deletions(-) diff --git a/src/cmd/rocker/main.go b/src/cmd/rocker/main.go index 25ce6bb4..3d88caae 100644 --- a/src/cmd/rocker/main.go +++ b/src/cmd/rocker/main.go @@ -101,6 +101,10 @@ func main() { Name: "no-cache", Usage: "supresses cache for docker builds", }, + cli.BoolFlag{ + Name: "reload-cache", + Usage: "removes any cache that hit and save the new one", + }, cli.StringFlag{ Name: "cache-dir", Value: "~/.rocker_cache", @@ -289,6 +293,7 @@ func buildCommand(c *cli.Context) { Verbose: c.GlobalBool("verbose"), ID: c.String("id"), NoCache: c.Bool("no-cache"), + ReloadCache: c.Bool("reload-cache"), Push: c.Bool("push"), }) diff --git a/src/rocker/build/build.go b/src/rocker/build/build.go index a8cbacd9..06866656 100644 --- a/src/rocker/build/build.go +++ b/src/rocker/build/build.go @@ -56,6 +56,7 @@ type Config struct { Attach bool Verbose bool NoCache bool + ReloadCache bool Push bool } @@ -160,6 +161,13 @@ func (b *Build) probeCache(s State) (cachedState State, hit bool, err error) { return s, false, nil } + if b.cfg.ReloadCache { + defer b.cache.Del(*s2) + s.NoCache.CacheBusted = true + log.Info(color.New(color.FgYellow).SprintFunc()("| Reload cache")) + return s, false, nil + } + var img *docker.Image if img, err = b.client.InspectImage(s2.ImageID); err != nil { return s, true, err diff --git a/src/rocker/build/cache.go b/src/rocker/build/cache.go index 0bf15f3d..888957eb 100644 --- a/src/rocker/build/cache.go +++ b/src/rocker/build/cache.go @@ -21,6 +21,7 @@ import ( "io/ioutil" "os" "path/filepath" + "time" log "github.com/Sirupsen/logrus" ) @@ -48,6 +49,8 @@ func NewCacheFS(root string) *CacheFS { func (c *CacheFS) Get(s State) (res *State, err error) { match := filepath.Join(c.root, s.ImageID) + latestTime := time.Unix(0, 0) + err = filepath.Walk(match, func(path string, info os.FileInfo, err error) error { if err != nil && os.IsNotExist(err) { return nil @@ -67,17 +70,14 @@ func (c *CacheFS) Get(s State) (res *State, err error) { log.Debugf("CACHE COMPARE %s %s %q %q", s.ImageID, s2.ImageID, s.Commits, s2.Commits) - if s.Equals(s2) { + if s.Equals(s2) && info.ModTime().After(latestTime) { + latestTime = info.ModTime() res = &s2 - return filepath.SkipDir } + return nil }) - if err == filepath.SkipDir { - return res, nil - } - return } From 4d0275e3fcdd6460c7fe467678b04721bc19c9c7 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Thu, 1 Oct 2015 14:22:57 +0300 Subject: [PATCH 098/131] move text formatter to a separate package --- src/cmd/rocker/main.go | 3 ++- .../text_formatter.go => textformatter/textformatter.go} | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) rename src/rocker/{build/text_formatter.go => textformatter/textformatter.go} (99%) diff --git a/src/cmd/rocker/main.go b/src/cmd/rocker/main.go index 3d88caae..0666ef28 100644 --- a/src/cmd/rocker/main.go +++ b/src/cmd/rocker/main.go @@ -25,6 +25,7 @@ import ( "rocker/build" "rocker/dockerclient" "rocker/template" + "rocker/textformatter" "rocker/util" "github.com/codegangsta/cli" @@ -366,7 +367,7 @@ func initLogs(ctx *cli.Context) { if json { logger.Formatter = &log.JSONFormatter{} } else { - formatter := &build.TextFormatter{} + formatter := &textformatter.TextFormatter{} formatter.DisableColors = !useColors logger.Formatter = formatter diff --git a/src/rocker/build/text_formatter.go b/src/rocker/textformatter/textformatter.go similarity index 99% rename from src/rocker/build/text_formatter.go rename to src/rocker/textformatter/textformatter.go index 13d928da..18c74c77 100644 --- a/src/rocker/build/text_formatter.go +++ b/src/rocker/textformatter/textformatter.go @@ -2,7 +2,7 @@ // Copyright (c) 2014 Simon Eskildsen // NOTE: modified to support no-color mode that is more human readable -package build +package textformatter import ( "bytes" From 41661439976151b3a87a658690beb3d08b2f8a88 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Fri, 2 Oct 2015 09:07:15 +0300 Subject: [PATCH 099/131] refactor COPY, fix `COPY lib lib/`, add more tests @dimsmol --- src/rocker/build/copy.go | 49 +++++++++---- src/rocker/build/copy_test.go | 132 ++++++++++++++++++++++++++++++++-- 2 files changed, 162 insertions(+), 19 deletions(-) diff --git a/src/rocker/build/copy.go b/src/rocker/build/copy.go index 98aeb567..5897cd6f 100644 --- a/src/rocker/build/copy.go +++ b/src/rocker/build/copy.go @@ -75,6 +75,7 @@ func copyFiles(b *Build, args []string, cmdName string) (s State, err error) { if !filepath.IsAbs(dest) { dest = filepath.Join(s.Config.WorkingDir, dest) + // Add the leading slash back if we had it before if hasLeadingSlash { dest += string(os.PathSeparator) } @@ -160,18 +161,41 @@ func makeTarStream(srcPath, dest, cmdName string, includes, excludes []string) ( return u, nil } - // If we transfer a single item and the destination is not a directory (no leading slash) - if !strings.HasSuffix(u.dest, sep) && len(includes) == 1 { - item := filepath.Clean(includes[0]) - // If we've got a single file that was explicitly pointed in the source item - // we need to replace its name with the destination - // e.g. COPY src/foo.txt /app/bar.txt - if len(u.files) == 1 && filepath.Clean(u.files[0].relDest) == item { + // If we transfer a single item + if len(includes) == 1 { + var ( + item = filepath.Clean(includes[0]) + itemPath = filepath.Join(srcPath, item) + hasLeadingSlash = strings.HasSuffix(u.dest, sep) + hasWildcards = containsWildcards(item) + itemIsDir = false + addSep = false + stripDir = false + ) + + if stat, err := os.Stat(itemPath); err == nil && stat.IsDir() { + itemIsDir = true + } + + // The destination is not a directory (no leading slash) add it to the end + if !hasLeadingSlash { + addSep = true + } + + // If the item copied is a directory, we have to strip its name + // e.g. COPY asd[/1,2] /lib --> /lib[/1,2] but not /lib/asd[/1,2] + if itemIsDir { + stripDir = true + } else if !hasWildcards && !hasLeadingSlash { + // If we've got a single file that was explicitly pointed in the source item + // we need to replace its name with the destination + // e.g. COPY src/foo.txt /app/bar.txt u.files[0].dest = strings.TrimLeft(u.dest, sep) u.dest = "" - } else if !containsWildcards(item) { - // The source item is a directory but not a wildcard, so we need to rename only - // the first bit e.g. COPY foo /src + addSep = false + } + + if stripDir { for i := range u.files { relDest, err := filepath.Rel(item, u.files[i].dest) if err != nil { @@ -179,8 +203,9 @@ func makeTarStream(srcPath, dest, cmdName string, includes, excludes []string) ( } u.files[i].dest = relDest } - u.dest += sep - } else { + } + + if addSep { u.dest += sep } } diff --git a/src/rocker/build/copy_test.go b/src/rocker/build/copy_test.go index 8e932026..1158a8dd 100644 --- a/src/rocker/build/copy_test.go +++ b/src/rocker/build/copy_test.go @@ -566,8 +566,7 @@ func TestCopy_MakeTarStream_DirRenameLeadingSlash(t *testing.T) { }) defer os.RemoveAll(tmpDir) - // ADD ./c /src --> /src - // ADD ./a/b[/1,2] /src -> /src[/1,2] + // ADD ./c/ /src --> /src includes := []string{ "./c/", @@ -595,6 +594,96 @@ func TestCopy_MakeTarStream_DirRenameLeadingSlash(t *testing.T) { assert.Equal(t, assertion, out, "bad tar content") } +func TestCopy_MakeTarStream_SingleFileToDir(t *testing.T) { + tmpDir := makeTmpDir(t, map[string]string{ + "foo.txt": "hello", + }) + defer os.RemoveAll(tmpDir) + + assertions := [][3]string{ + {"foo.txt", "foo", "foo"}, + {"foo.txt", "foo/", "foo/foo.txt"}, + } + + for _, a := range assertions { + includes := []string{a[0]} + excludes := []string{} + dest := a[1] + + t.Logf("includes: %# v", pretty.Formatter(includes)) + t.Logf("excludes: %# v", pretty.Formatter(excludes)) + t.Logf("dest: %# v", pretty.Formatter(dest)) + + stream, err := makeTarStream(tmpDir, dest, "COPY", includes, excludes) + if err != nil { + t.Fatal(err) + } + + out := writeReadTar(t, tmpDir, stream.tar) + + assertion := strings.Join([]string{a[2]}, "\n") + "\n" + + assert.Equal(t, assertion, out, "bad tar content for COPY %s %s", a[0], a[1]) + } +} + +// TODO: +// WORKDIR /app +// COPY lib lib/ +// should copy to /app/lib + +func TestCopy_MakeTarStream_DirRenameDestLeadingSlash(t *testing.T) { + tmpDir := makeTmpDir(t, map[string]string{ + "lib/foo.txt": "hello", + "lib/x/1.txt": "hello", + "lib/x/2.txt": "hello", + }) + defer os.RemoveAll(tmpDir) + + // WORKDIR /app + // COPY lib lib + // COPY lib lib/ + // COPY lib/ lib + // COPY lib/ lib/ + // /app + // /app/lib + // /app/lib/foo.txt + // /app/lib/x/1.txt + // /app/lib/x/2.txt + + assertions := [][2]string{ + {"lib", "lib"}, + {"lib", "lib/"}, + {"lib/", "lib"}, + {"lib/", "lib/"}, + } + + for _, a := range assertions { + includes := []string{a[0]} + excludes := []string{} + dest := a[1] + + t.Logf("includes: %# v", pretty.Formatter(includes)) + t.Logf("excludes: %# v", pretty.Formatter(excludes)) + t.Logf("dest: %# v", pretty.Formatter(dest)) + + stream, err := makeTarStream(tmpDir, dest, "COPY", includes, excludes) + if err != nil { + t.Fatal(err) + } + + out := writeReadTar(t, tmpDir, stream.tar) + + assertion := strings.Join([]string{ + "lib/foo.txt", + "lib/x/1.txt", + "lib/x/2.txt", + }, "\n") + "\n" + + assert.Equal(t, assertion, out, "bad tar content for COPY %s %s", a[0], a[1]) + } +} + func TestCopy_MakeTarStream_DirRenameWildcard(t *testing.T) { tmpDir := makeTmpDir(t, map[string]string{ "c/foo.txt": "hello", @@ -632,6 +721,40 @@ func TestCopy_MakeTarStream_DirRenameWildcard(t *testing.T) { assert.Equal(t, assertion, out, "bad tar content") } +func TestCopy_MakeTarStream_SubDirRenameWildcard(t *testing.T) { + tmpDir := makeTmpDir(t, map[string]string{ + "a/c/foo.txt": "hello", + "a/c/x/1.txt": "hello", + "a/c/x/2.txt": "hello", + }) + defer os.RemoveAll(tmpDir) + + includes := []string{ + "a/*", + } + excludes := []string{} + dest := "/src" + + t.Logf("includes: %# v", pretty.Formatter(includes)) + t.Logf("excludes: %# v", pretty.Formatter(excludes)) + t.Logf("dest: %# v", pretty.Formatter(dest)) + + stream, err := makeTarStream(tmpDir, dest, "COPY", includes, excludes) + if err != nil { + t.Fatal(err) + } + + out := writeReadTar(t, tmpDir, stream.tar) + + assertion := strings.Join([]string{ + "src/c/foo.txt", + "src/c/x/1.txt", + "src/c/x/2.txt", + }, "\n") + "\n" + + assert.Equal(t, assertion, out, "bad tar content") +} + func TestCopy_MakeTarStream_SingleFileDirRename(t *testing.T) { tmpDir := makeTmpDir(t, map[string]string{ "c/foo.txt": "hello", @@ -665,11 +788,6 @@ func TestCopy_MakeTarStream_SingleFileDirRename(t *testing.T) { assert.Equal(t, assertion, out, "bad tar content") } -// TODO: -// WORKDIR /app -// COPY lib lib/ -// should copy to /app/lib - // helper functions func makeTmpDir(t *testing.T, files map[string]string) string { From e8d7831abc4cf71e276d1b1934e2d8b04a291eb6 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Tue, 6 Oct 2015 16:00:34 +0300 Subject: [PATCH 100/131] cached message --- src/rocker/build/build.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rocker/build/build.go b/src/rocker/build/build.go index 06866656..437632e8 100644 --- a/src/rocker/build/build.go +++ b/src/rocker/build/build.go @@ -186,7 +186,7 @@ func (b *Build) probeCache(s State) (cachedState State, hit bool, err error) { log.WithFields(log.Fields{ "size": size, - }).Infof(color.New(color.FgGreen).SprintfFunc()("| Take image %.12s from cache", s2.ImageID)) + }).Infof(color.New(color.FgGreen).SprintfFunc()("| Cached! Take image %.12s", s2.ImageID)) // Store some stuff to the build b.ProducedSize += img.Size From 4784e6d9d8093af98367586a10078c738b283f67 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Tue, 6 Oct 2015 16:00:51 +0300 Subject: [PATCH 101/131] context dir also for stdin rockerfile & debug it --- src/cmd/rocker/main.go | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/src/cmd/rocker/main.go b/src/cmd/rocker/main.go index 0666ef28..d425cda0 100644 --- a/src/cmd/rocker/main.go +++ b/src/cmd/rocker/main.go @@ -235,15 +235,18 @@ func buildCommand(c *cli.Context) { // Initialize context dir contextDir = filepath.Dir(configFilename) - args := c.Args() - if len(args) > 0 { - contextDir = args[0] - if !filepath.IsAbs(contextDir) { - contextDir = filepath.Join(wd, args[0]) - } + } + + args := c.Args() + if len(args) > 0 { + contextDir = args[0] + if !filepath.IsAbs(contextDir) { + contextDir = filepath.Join(wd, args[0]) } } + log.Debugf("Context directory: %s", contextDir) + if c.Bool("print") { fmt.Print(rockerfile.Content) os.Exit(0) From 24da674773ebaaecde707a44c8858398a43791a9 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Tue, 6 Oct 2015 16:26:36 +0300 Subject: [PATCH 102/131] move HostConfig to NoCache in state --- src/rocker/build/client.go | 2 +- src/rocker/build/commands.go | 16 ++++++++-------- src/rocker/build/commands_test.go | 4 ++-- src/rocker/build/state.go | 2 +- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/src/rocker/build/client.go b/src/rocker/build/client.go index 837b412c..e66fffd2 100644 --- a/src/rocker/build/client.go +++ b/src/rocker/build/client.go @@ -141,7 +141,7 @@ func (c *DockerClient) CreateContainer(s State) (string, error) { opts := docker.CreateContainerOptions{ Config: &s.Config, - HostConfig: &s.HostConfig, + HostConfig: &s.NoCache.HostConfig, } log.Debugf("Create container: %# v", pretty.Formatter(opts)) diff --git a/src/rocker/build/commands.go b/src/rocker/build/commands.go index 6749c00b..e0d7427d 100644 --- a/src/rocker/build/commands.go +++ b/src/rocker/build/commands.go @@ -1072,11 +1072,11 @@ func (c *CommandMount) Execute(b *Build) (s State, err error) { return s, err } - if s.HostConfig.Binds == nil { - s.HostConfig.Binds = []string{} + if s.NoCache.HostConfig.Binds == nil { + s.NoCache.HostConfig.Binds = []string{} } - s.HostConfig.Binds = append(s.HostConfig.Binds, src+":"+dest) + s.NoCache.HostConfig.Binds = append(s.NoCache.HostConfig.Binds, src+":"+dest) commitIds = append(commitIds, arg) // MOUNT dir @@ -1086,11 +1086,11 @@ func (c *CommandMount) Execute(b *Build) (s State, err error) { return s, err } - if s.HostConfig.VolumesFrom == nil { - s.HostConfig.VolumesFrom = []string{} + if s.NoCache.HostConfig.VolumesFrom == nil { + s.NoCache.HostConfig.VolumesFrom = []string{} } - s.HostConfig.VolumesFrom = append(s.HostConfig.VolumesFrom, name) + s.NoCache.HostConfig.VolumesFrom = append(s.NoCache.HostConfig.VolumesFrom, name) commitIds = append(commitIds, name+":"+arg) } } @@ -1173,7 +1173,7 @@ func (c *CommandExport) Execute(b *Build) (s State, err error) { }() // Append exports container as a volume - s.HostConfig.VolumesFrom = []string{exportsContainerID} + s.NoCache.HostConfig.VolumesFrom = []string{exportsContainerID} cmd := []string{"/opt/rsync/bin/rsync", "-a", "--delete-during"} @@ -1285,7 +1285,7 @@ func (c *CommandImport) Execute(b *Build) (s State, err error) { s.Config.Cmd = cmd s.Config.Entrypoint = []string{} - s.HostConfig.VolumesFrom = []string{b.exportsContainerName()} + s.NoCache.HostConfig.VolumesFrom = []string{b.exportsContainerName()} if importID, err = b.client.CreateContainer(s); err != nil { return s, err diff --git a/src/rocker/build/commands_test.go b/src/rocker/build/commands_test.go index 0315d3a6..0ae7543a 100644 --- a/src/rocker/build/commands_test.go +++ b/src/rocker/build/commands_test.go @@ -706,7 +706,7 @@ func TestCommandMount_Simple(t *testing.T) { } c.AssertExpectations(t) - assert.Equal(t, []string{"/resolved/src:/dest"}, state.HostConfig.Binds) + assert.Equal(t, []string{"/resolved/src:/dest"}, state.NoCache.HostConfig.Binds) assert.Equal(t, `MOUNT ["/src:/dest"]`, state.GetCommits()) } @@ -735,7 +735,7 @@ func TestCommandMount_VolumeContainer(t *testing.T) { commitMsg := fmt.Sprintf("MOUNT [\"%s:/cache\"]", containerName) c.AssertExpectations(t) - assert.Equal(t, []string{containerName}, state.HostConfig.VolumesFrom) + assert.Equal(t, []string{containerName}, state.NoCache.HostConfig.VolumesFrom) assert.Equal(t, commitMsg, state.GetCommits()) } diff --git a/src/rocker/build/state.go b/src/rocker/build/state.go index da2ba288..3fcf1085 100644 --- a/src/rocker/build/state.go +++ b/src/rocker/build/state.go @@ -28,7 +28,6 @@ import ( // TODO: document type State struct { Config docker.Config - HostConfig docker.HostConfig // TODO: move to NoCache? ImageID string ParentID string ExportsID string @@ -46,6 +45,7 @@ type StateNoCache struct { CacheBusted bool CmdSet bool ContainerID string + HostConfig docker.HostConfig } // NewState makes a fresh state From 173968d03d3e4104d97d2da28e7d689c1f6d41dd Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Wed, 7 Oct 2015 15:55:41 +0300 Subject: [PATCH 103/131] Fix RUN if ENTRYPOINT specified --- src/rocker/build/commands.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/rocker/build/commands.go b/src/rocker/build/commands.go index e0d7427d..d056b9ad 100644 --- a/src/rocker/build/commands.go +++ b/src/rocker/build/commands.go @@ -393,7 +393,9 @@ func (c *CommandRun) Execute(b *Build) (s State, err error) { // We run this command in the container using CMD origCmd := s.Config.Cmd + origEntrypoint := s.Config.Entrypoint s.Config.Cmd = cmd + s.Config.Entrypoint = []string{} if s.NoCache.ContainerID, err = b.client.CreateContainer(s); err != nil { return s, err @@ -406,6 +408,7 @@ func (c *CommandRun) Execute(b *Build) (s State, err error) { // Restore command after commit s.Config.Cmd = origCmd + s.Config.Entrypoint = origEntrypoint return s, nil } From 80510375c825db6731268f9f4f247a404879c966 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Wed, 7 Oct 2015 16:45:55 +0300 Subject: [PATCH 104/131] ping docker server before running the build --- src/cmd/rocker/main.go | 5 +++++ src/rocker/dockerclient/dockerclient.go | 21 +++++++++++++++++++++ 2 files changed, 26 insertions(+) diff --git a/src/cmd/rocker/main.go b/src/cmd/rocker/main.go index d425cda0..eef50053 100644 --- a/src/cmd/rocker/main.go +++ b/src/cmd/rocker/main.go @@ -306,6 +306,11 @@ func buildCommand(c *cli.Context) { log.Fatal(err) } + // Check the docker connection before we actually run + if err := dockerclient.Ping(dockerClient, 5000); err != nil { + log.Fatal(err) + } + if err := builder.Run(plan); err != nil { log.Fatal(err) } diff --git a/src/rocker/dockerclient/dockerclient.go b/src/rocker/dockerclient/dockerclient.go index 20e8a20f..7077c897 100644 --- a/src/rocker/dockerclient/dockerclient.go +++ b/src/rocker/dockerclient/dockerclient.go @@ -25,6 +25,7 @@ import ( "os" "strconv" "strings" + "time" "github.com/codegangsta/cli" "github.com/fsouza/go-dockerclient" @@ -95,6 +96,26 @@ func NewFromCli(c *cli.Context) (*docker.Client, error) { return NewFromConfig(NewConfigFromCli(c)) } +// Ping pings docker client but with timeout +// The problem is that for some reason it's impossible to set the +// default timeout for the go-dockerclient Dialer, need to investigate +func Ping(client *docker.Client, timeoutMs int) error { + var ( + chErr = make(chan error) + timeout = time.Duration(timeoutMs) * time.Millisecond + ) + go func() { + chErr <- client.Ping() + }() + select { + case err := <-chErr: + return err + case <-time.After(timeout): + // TODO: can we kill the ping goroutine? + return fmt.Errorf("Failed to reach docker server, timeout %s", timeout) + } +} + // GlobalCliParams returns global params that configures docker client connection func GlobalCliParams() []cli.Flag { return []cli.Flag{ From 67030e60e0cc006f2c4ab2d0d522fded07ce6a53 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Tue, 13 Oct 2015 10:21:23 +0300 Subject: [PATCH 105/131] vendor docker's build shellparser --- src/rocker/shellparser/LICENSE | 191 ++++++++++++++++ src/rocker/shellparser/shellparser.go | 247 +++++++++++++++++++++ src/rocker/shellparser/shellparser_test.go | 55 +++++ src/rocker/shellparser/testdata/words | 58 +++++ 4 files changed, 551 insertions(+) create mode 100644 src/rocker/shellparser/LICENSE create mode 100644 src/rocker/shellparser/shellparser.go create mode 100644 src/rocker/shellparser/shellparser_test.go create mode 100644 src/rocker/shellparser/testdata/words diff --git a/src/rocker/shellparser/LICENSE b/src/rocker/shellparser/LICENSE new file mode 100644 index 00000000..c7a3f0cf --- /dev/null +++ b/src/rocker/shellparser/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2015 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/src/rocker/shellparser/shellparser.go b/src/rocker/shellparser/shellparser.go new file mode 100644 index 00000000..0b7ccf20 --- /dev/null +++ b/src/rocker/shellparser/shellparser.go @@ -0,0 +1,247 @@ +// NOTICE: it was originally grabbed from the docker source +// without modifications; see LICENSE in the current +// directory from the license and the copyright. + +package shellparser + +// This will take a single word and an array of env variables and +// process all quotes (" and ') as well as $xxx and ${xxx} env variable +// tokens. Tries to mimic bash shell process. +// It doesn't support all flavors of ${xx:...} formats but new ones can +// be added by adding code to the "special ${} format processing" section + +import ( + "fmt" + "strings" + "unicode" +) + +type shellWord struct { + word string + envs []string + pos int +} + +// ProcessWord will use the 'env' list of environment variables, +// and replace any env var references in 'word'. +func ProcessWord(word string, env []string) (string, error) { + sw := &shellWord{ + word: word, + envs: env, + pos: 0, + } + return sw.process() +} + +func (sw *shellWord) process() (string, error) { + return sw.processStopOn('\000') +} + +// Process the word, starting at 'pos', and stop when we get to the +// end of the word or the 'stopChar' character +func (sw *shellWord) processStopOn(stopChar rune) (string, error) { + var result string + var charFuncMapping = map[rune]func() (string, error){ + '\'': sw.processSingleQuote, + '"': sw.processDoubleQuote, + '$': sw.processDollar, + } + + for sw.pos < len(sw.word) { + ch := sw.peek() + if stopChar != '\000' && ch == stopChar { + sw.next() + break + } + if fn, ok := charFuncMapping[ch]; ok { + // Call special processing func for certain chars + tmp, err := fn() + if err != nil { + return "", err + } + result += tmp + } else { + // Not special, just add it to the result + ch = sw.next() + if ch == '\\' { + // '\' escapes, except end of line + ch = sw.next() + if ch == '\000' { + continue + } + } + result += string(ch) + } + } + + return result, nil +} + +func (sw *shellWord) peek() rune { + if sw.pos == len(sw.word) { + return '\000' + } + return rune(sw.word[sw.pos]) +} + +func (sw *shellWord) next() rune { + if sw.pos == len(sw.word) { + return '\000' + } + ch := rune(sw.word[sw.pos]) + sw.pos++ + return ch +} + +func (sw *shellWord) processSingleQuote() (string, error) { + // All chars between single quotes are taken as-is + // Note, you can't escape ' + var result string + + sw.next() + + for { + ch := sw.next() + if ch == '\000' || ch == '\'' { + break + } + result += string(ch) + } + return result, nil +} + +func (sw *shellWord) processDoubleQuote() (string, error) { + // All chars up to the next " are taken as-is, even ', except any $ chars + // But you can escape " with a \ + var result string + + sw.next() + + for sw.pos < len(sw.word) { + ch := sw.peek() + if ch == '"' { + sw.next() + break + } + if ch == '$' { + tmp, err := sw.processDollar() + if err != nil { + return "", err + } + result += tmp + } else { + ch = sw.next() + if ch == '\\' { + chNext := sw.peek() + + if chNext == '\000' { + // Ignore \ at end of word + continue + } + + if chNext == '"' || chNext == '$' { + // \" and \$ can be escaped, all other \'s are left as-is + ch = sw.next() + } + } + result += string(ch) + } + } + + return result, nil +} + +func (sw *shellWord) processDollar() (string, error) { + sw.next() + ch := sw.peek() + if ch == '{' { + sw.next() + name := sw.processName() + ch = sw.peek() + if ch == '}' { + // Normal ${xx} case + sw.next() + return sw.getEnv(name), nil + } + if ch == ':' { + // Special ${xx:...} format processing + // Yes it allows for recursive $'s in the ... spot + + sw.next() // skip over : + modifier := sw.next() + + word, err := sw.processStopOn('}') + if err != nil { + return "", err + } + + // Grab the current value of the variable in question so we + // can use to to determine what to do based on the modifier + newValue := sw.getEnv(name) + + switch modifier { + case '+': + if newValue != "" { + newValue = word + } + return newValue, nil + + case '-': + if newValue == "" { + newValue = word + } + return newValue, nil + + default: + return "", fmt.Errorf("Unsupported modifier (%c) in substitution: %s", modifier, sw.word) + } + } + return "", fmt.Errorf("Missing ':' in substitution: %s", sw.word) + } + // $xxx case + name := sw.processName() + if name == "" { + return "$", nil + } + return sw.getEnv(name), nil +} + +func (sw *shellWord) processName() string { + // Read in a name (alphanumeric or _) + // If it starts with a numeric then just return $# + var name string + + for sw.pos < len(sw.word) { + ch := sw.peek() + if len(name) == 0 && unicode.IsDigit(ch) { + ch = sw.next() + return string(ch) + } + if !unicode.IsLetter(ch) && !unicode.IsDigit(ch) && ch != '_' { + break + } + ch = sw.next() + name += string(ch) + } + + return name +} + +func (sw *shellWord) getEnv(name string) string { + for _, env := range sw.envs { + i := strings.Index(env, "=") + if i < 0 { + if name == env { + // Should probably never get here, but just in case treat + // it like "var" and "var=" are the same + return "" + } + continue + } + if name != env[:i] { + continue + } + return env[i+1:] + } + return "" +} diff --git a/src/rocker/shellparser/shellparser_test.go b/src/rocker/shellparser/shellparser_test.go new file mode 100644 index 00000000..e2ce5064 --- /dev/null +++ b/src/rocker/shellparser/shellparser_test.go @@ -0,0 +1,55 @@ +// NOTICE: it was originally grabbed from the docker source +// without modifications; see LICENSE in the current +// directory from the license and the copyright. + +package shellparser + +import ( + "bufio" + "os" + "strings" + "testing" +) + +func TestShellParser(t *testing.T) { + file, err := os.Open("testdata/words") + if err != nil { + t.Fatalf("Can't open 'words': %s", err) + } + defer file.Close() + + scanner := bufio.NewScanner(file) + envs := []string{"PWD=/home", "SHELL=bash"} + for scanner.Scan() { + line := scanner.Text() + + // Trim comments and blank lines + i := strings.Index(line, "#") + if i >= 0 { + line = line[:i] + } + line = strings.TrimSpace(line) + + if line == "" { + continue + } + + words := strings.Split(line, "|") + if len(words) != 2 { + t.Fatalf("Error in 'words' - should be 2 words:%q", words) + } + + words[0] = strings.TrimSpace(words[0]) + words[1] = strings.TrimSpace(words[1]) + + newWord, err := ProcessWord(words[0], envs) + + if err != nil { + newWord = "error" + } + + if newWord != words[1] { + t.Fatalf("Error. Src: %s Calc: %s Expected: %s", words[0], newWord, words[1]) + } + } +} diff --git a/src/rocker/shellparser/testdata/words b/src/rocker/shellparser/testdata/words new file mode 100644 index 00000000..1114a7e4 --- /dev/null +++ b/src/rocker/shellparser/testdata/words @@ -0,0 +1,58 @@ +hello | hello +he'll'o | hello +he'llo | hello +he\'llo | he'llo +he\\'llo | he\llo +abc\tdef | abctdef +"abc\tdef" | abc\tdef +'abc\tdef' | abc\tdef +hello\ | hello +hello\\ | hello\ +"hello | hello +"hello\" | hello" +"hel'lo" | hel'lo +'hello | hello +'hello\' | hello\ +"''" | '' +$. | $. +$1 | +he$1x | hex +he$.x | he$.x +he$pwd. | he. +he$PWD | he/home +he\$PWD | he$PWD +he\\$PWD | he\/home +he\${} | he${} +he\${}xx | he${}xx +he${} | he +he${}xx | hexx +he${hi} | he +he${hi}xx | hexx +he${PWD} | he/home +he${.} | error +he${XXX:-000}xx | he000xx +he${PWD:-000}xx | he/homexx +he${XXX:-$PWD}xx | he/homexx +he${XXX:-${PWD:-yyy}}xx | he/homexx +he${XXX:-${YYY:-yyy}}xx | heyyyxx +he${XXX:YYY} | error +he${XXX:+${PWD}}xx | hexx +he${PWD:+${XXX}}xx | hexx +he${PWD:+${SHELL}}xx | hebashxx +he${XXX:+000}xx | hexx +he${PWD:+000}xx | he000xx +'he${XX}' | he${XX} +"he${PWD}" | he/home +"he'$PWD'" | he'/home' +"$PWD" | /home +'$PWD' | $PWD +'\$PWD' | \$PWD +'"hello"' | "hello" +he\$PWD | he$PWD +"he\$PWD" | he$PWD +'he\$PWD' | he\$PWD +he${PWD | error +he${PWD:=000}xx | error +he${PWD:+${PWD}:}xx | he/home:xx +he${XXX:-\$PWD:}xx | he$PWD:xx +he${XXX:-\${PWD}z}xx | he${PWDz}xx From 190726d7f590bef1c3970e1c9e1f19efc8046344 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Tue, 13 Oct 2015 10:22:46 +0300 Subject: [PATCH 106/131] fix #42 replace actual env variables for some commands --- src/rocker/build/build.go | 5 ++++ src/rocker/build/commands.go | 58 ++++++++++++++++++++++++++++++++++++ 2 files changed, 63 insertions(+) diff --git a/src/rocker/build/build.go b/src/rocker/build/build.go index 437632e8..cb0d5ca9 100644 --- a/src/rocker/build/build.go +++ b/src/rocker/build/build.go @@ -105,6 +105,11 @@ func (b *Build) Run(plan Plan) (err error) { continue } + // Replace env for the command if appropriate + if c, ok := c.(EnvReplacableCommand); ok { + c.ReplaceEnv(b.state.Config.Env) + } + log.Infof("%s", color.New(color.FgWhite, color.Bold).SprintFunc()(c)) if b.state, err = c.Execute(b); err != nil { diff --git a/src/rocker/build/commands.go b/src/rocker/build/commands.go index d056b9ad..aa795884 100644 --- a/src/rocker/build/commands.go +++ b/src/rocker/build/commands.go @@ -24,6 +24,7 @@ import ( "path/filepath" "regexp" "rocker/imagename" + "rocker/shellparser" "rocker/util" "sort" "strings" @@ -60,6 +61,12 @@ type Command interface { String() string } +// EnvReplacableCommand interface describes the command that can replace ENV +// variables into arguments of itself +type EnvReplacableCommand interface { + ReplaceEnv(env []string) error +} + // NewCommand make a new command according to the configuration given func NewCommand(cfg ConfigCommand) (cmd Command, err error) { // TODO: use reflection? @@ -498,6 +505,11 @@ func (c *CommandEnv) ShouldRun(b *Build) (bool, error) { return true, nil } +// ReplaceEnv implements EnvReplacableCommand interface +func (c *CommandEnv) ReplaceEnv(env []string) error { + return replaceEnv(c.cfg.args, env) +} + // Execute runs the command func (c *CommandEnv) Execute(b *Build) (s State, err error) { @@ -555,6 +567,11 @@ func (c *CommandLabel) ShouldRun(b *Build) (bool, error) { return true, nil } +// ReplaceEnv implements EnvReplacableCommand interface +func (c *CommandLabel) ReplaceEnv(env []string) error { + return replaceEnv(c.cfg.args, env) +} + // Execute runs the command func (c *CommandLabel) Execute(b *Build) (s State, err error) { @@ -606,6 +623,11 @@ func (c *CommandWorkdir) ShouldRun(b *Build) (bool, error) { return true, nil } +// ReplaceEnv implements EnvReplacableCommand interface +func (c *CommandWorkdir) ReplaceEnv(env []string) error { + return replaceEnv(c.cfg.args, env) +} + // Execute runs the command func (c *CommandWorkdir) Execute(b *Build) (s State, err error) { @@ -725,6 +747,11 @@ func (c *CommandExpose) ShouldRun(b *Build) (bool, error) { return true, nil } +// ReplaceEnv implements EnvReplacableCommand interface +func (c *CommandExpose) ReplaceEnv(env []string) error { + return replaceEnv(c.cfg.args, env) +} + // Execute runs the command func (c *CommandExpose) Execute(b *Build) (s State, err error) { @@ -779,6 +806,11 @@ func (c *CommandVolume) ShouldRun(b *Build) (bool, error) { return true, nil } +// ReplaceEnv implements EnvReplacableCommand interface +func (c *CommandVolume) ReplaceEnv(env []string) error { + return replaceEnv(c.cfg.args, env) +} + // Execute runs the command func (c *CommandVolume) Execute(b *Build) (s State, err error) { @@ -819,6 +851,11 @@ func (c *CommandUser) ShouldRun(b *Build) (bool, error) { return true, nil } +// ReplaceEnv implements EnvReplacableCommand interface +func (c *CommandUser) ReplaceEnv(env []string) error { + return replaceEnv(c.cfg.args, env) +} + // Execute runs the command func (c *CommandUser) Execute(b *Build) (s State, err error) { @@ -993,6 +1030,11 @@ func (c *CommandCopy) ShouldRun(b *Build) (bool, error) { return true, nil } +// ReplaceEnv implements EnvReplacableCommand interface +func (c *CommandCopy) ReplaceEnv(env []string) error { + return replaceEnv(c.cfg.args, env) +} + // Execute runs the command func (c *CommandCopy) Execute(b *Build) (State, error) { if len(c.cfg.args) < 2 { @@ -1017,6 +1059,11 @@ func (c *CommandAdd) ShouldRun(b *Build) (bool, error) { return true, nil } +// ReplaceEnv implements EnvReplacableCommand interface +func (c *CommandAdd) ReplaceEnv(env []string) error { + return replaceEnv(c.cfg.args, env) +} + // Execute runs the command func (c *CommandAdd) Execute(b *Build) (State, error) { if len(c.cfg.args) < 2 { @@ -1325,3 +1372,14 @@ func (c *CommandOnbuildWrap) ShouldRun(b *Build) (bool, error) { func (c *CommandOnbuildWrap) Execute(b *Build) (State, error) { return c.cmd.Execute(b) } + +////////// Private stuff ////////// + +func replaceEnv(args []string, env []string) (err error) { + for i, v := range args { + if args[i], err = shellparser.ProcessWord(v, env); err != nil { + return err + } + } + return nil +} From 63c29a8d98581c1914d8b335fac694e9048fe034 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Tue, 13 Oct 2015 10:34:08 +0300 Subject: [PATCH 107/131] #42 make test for env vars substitution --- src/rocker/build/build_test.go | 36 +++++++++++++++++++++++++++++++++- 1 file changed, 35 insertions(+), 1 deletion(-) diff --git a/src/rocker/build/build_test.go b/src/rocker/build/build_test.go index 9c02eb71..a3d2d125 100644 --- a/src/rocker/build/build_test.go +++ b/src/rocker/build/build_test.go @@ -28,11 +28,41 @@ import ( "github.com/stretchr/testify/mock" ) -func TestNewBuild(t *testing.T) { +func TestBuild_NewBuild(t *testing.T) { b, _ := makeBuild(t, "FROM ubuntu", Config{}) assert.IsType(t, &Rockerfile{}, b.rockerfile) } +func TestBuild_ReplaceEnvVars(t *testing.T) { + rockerfile := "FROM ubuntu\nENV PATH=$PATH:/cassandra/bin" + b, c := makeBuild(t, rockerfile, Config{}) + plan := makePlan(t, rockerfile) + + img := &docker.Image{ + ID: "123", + Config: &docker.Config{ + Env: []string{"PATH=/usr/bin"}, + }, + } + + resultImage := &docker.Image{ID: "789"} + + c.On("InspectImage", "ubuntu").Return(img, nil).Once() + + c.On("CreateContainer", mock.AnythingOfType("State")).Return("456", nil).Run(func(args mock.Arguments) { + arg := args.Get(0).(State) + assert.Equal(t, []string{"PATH=/usr/bin:/cassandra/bin"}, arg.Config.Env) + }).Once() + + c.On("CommitContainer", mock.AnythingOfType("State"), "ENV PATH=/usr/bin:/cassandra/bin").Return(resultImage, nil).Once() + + c.On("RemoveContainer", "456").Return(nil).Once() + + if err := b.Run(plan); err != nil { + t.Fatal(err) + } +} + // internal helpers func makeBuild(t *testing.T, rockerfileContent string, cfg Config) (*Build, *MockClient) { @@ -44,12 +74,16 @@ func makeBuild(t *testing.T, rockerfileContent string, cfg Config) (*Build, *Moc t.Fatal(err) } + cfg.NoCache = true + c := &MockClient{} b := New(c, r, nil, cfg) return b, c } +// Docker client mock + type MockClient struct { mock.Mock } From aeed33b54d3970051d4f2bf7a4a7dfcfffca1763 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Tue, 20 Oct 2015 17:48:25 +0300 Subject: [PATCH 108/131] #44 fix imagename yaml marshalling --- src/rocker/imagename/imagename.go | 5 ++--- src/rocker/imagename/imagename_test.go | 16 ++++++++++++++++ 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/src/rocker/imagename/imagename.go b/src/rocker/imagename/imagename.go index 3f6f2c7f..0e9d9cd6 100644 --- a/src/rocker/imagename/imagename.go +++ b/src/rocker/imagename/imagename.go @@ -24,7 +24,6 @@ import ( "sort" "strings" - "github.com/go-yaml/yaml" "github.com/wmark/semver" ) @@ -287,8 +286,8 @@ func (img *ImageName) UnmarshalYAML(unmarshal func(interface{}) error) error { } // MarshalYAML serializes ImageName to YAML string -func (img ImageName) MarshalYAML() ([]byte, error) { - return yaml.Marshal(img.String()) +func (img ImageName) MarshalYAML() (interface{}, error) { + return img.String(), nil } // Tags is a structure used for cleaning images diff --git a/src/rocker/imagename/imagename_test.go b/src/rocker/imagename/imagename_test.go index e3374e6c..f9e8cf66 100644 --- a/src/rocker/imagename/imagename_test.go +++ b/src/rocker/imagename/imagename_test.go @@ -21,6 +21,7 @@ import ( "testing" "time" + "github.com/go-yaml/yaml" "github.com/kr/pretty" "github.com/stretchr/testify/assert" @@ -476,3 +477,18 @@ func TestTagsGetOld(t *testing.T) { assert.Equal(t, "hub/ns/name:2", old[1].String(), "bad old image 2") assert.Equal(t, "hub/ns/name:1", old[2].String(), "bad old image 3") } + +func TestImagename_ToYaml(t *testing.T) { + value := struct { + Name *ImageName + }{ + NewFromString("hub/ns/name:1"), + } + + data, err := yaml.Marshal(value) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, "name: hub/ns/name:1\n", string(data)) +} From f7652b632f3f4fccd3edc95bb413d5be8a69d10d Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Tue, 20 Oct 2015 17:50:00 +0300 Subject: [PATCH 109/131] #44 use an artifact data structure instead of string --- src/rocker/build/commands.go | 34 +++++++++++------- src/rocker/imagename/artifact.go | 62 ++++++++++++++++++++++++++++++++ 2 files changed, 83 insertions(+), 13 deletions(-) create mode 100644 src/rocker/imagename/artifact.go diff --git a/src/rocker/build/commands.go b/src/rocker/build/commands.go index aa795884..5ec1554c 100644 --- a/src/rocker/build/commands.go +++ b/src/rocker/build/commands.go @@ -28,11 +28,13 @@ import ( "rocker/util" "sort" "strings" + "time" log "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/nat" "github.com/docker/docker/pkg/units" "github.com/fsouza/go-dockerclient" + "github.com/go-yaml/yaml" "github.com/kr/pretty" ) @@ -974,11 +976,12 @@ func (c *CommandPush) Execute(b *Build) (State, error) { } image := imagename.NewFromString(c.cfg.args[0]) - artifactProps := []string{ - fmt.Sprintf("Name: %s", image), - fmt.Sprintf("Pushed: %t", b.cfg.Push), - fmt.Sprintf("Tag: %s", image.GetTag()), - fmt.Sprintf("ImageID: %s", b.state.ImageID), + artifact := imagename.Artifact{ + Name: image, + Pushed: b.cfg.Push, + Tag: image.GetTag(), + ImageID: b.state.ImageID, + BuildTime: time.Now(), } // push image and add some lines to artifacts @@ -987,11 +990,8 @@ func (c *CommandPush) Execute(b *Build) (State, error) { if err != nil { return b.state, err } - artifactProps = append( - artifactProps, - fmt.Sprintf("Digest: %s", digest), - fmt.Sprintf("Addressable: %s@%s", image.NameWithRegistry(), digest), - ) + artifact.Digest = digest + artifact.Addressable = fmt.Sprintf("%s@%s", image.NameWithRegistry(), digest) } else { log.Infof("| Don't push. Pass --push flag to actually push to the registry") } @@ -1001,15 +1001,23 @@ func (c *CommandPush) Execute(b *Build) (State, error) { if err := os.MkdirAll(b.cfg.ArtifactsPath, 0755); err != nil { return b.state, fmt.Errorf("Failed to create directory %s for the artifacts, error: %s", b.cfg.ArtifactsPath, err) } - filePath := filepath.Join(b.cfg.ArtifactsPath, image.GetTag()) - content := []byte(strings.Join(artifactProps, "\n") + "\n") + filePath := filepath.Join(b.cfg.ArtifactsPath, artifact.GetFileName()) + + artifacts := imagename.Artifacts{ + []imagename.Artifact{artifact}, + } + content, err := yaml.Marshal(artifacts) + if err != nil { + return b.state, err + } if err := ioutil.WriteFile(filePath, content, 0644); err != nil { return b.state, fmt.Errorf("Failed to write artifact file %s, error: %s", filePath, err) } + log.Infof("| Saved artifact file %s", filePath) - log.Debugf("Artifact properties: %# v", pretty.Formatter(artifactProps)) + log.Debugf("Artifact properties: %# v", pretty.Formatter(artifact)) } return b.state, nil diff --git a/src/rocker/imagename/artifact.go b/src/rocker/imagename/artifact.go new file mode 100644 index 00000000..7b1dd732 --- /dev/null +++ b/src/rocker/imagename/artifact.go @@ -0,0 +1,62 @@ +/*- + * Copyright 2015 Grammarly, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package imagename + +import ( + "fmt" + "strings" + + "time" +) + +// Artifact represents the artifact that is the result of image build +// It holds information about the pushed image and may be saved as a file +type Artifact struct { + Name *ImageName `yaml:"Name"` + Pushed bool `yaml:"Pushed"` + Tag string `yaml:"Tag"` + Digest string `yaml:"Digest"` + ImageID string `yaml:"ImageID"` + Addressable string `yaml:"Addressable"` + BuildTime time.Time `yaml:"BuildTime"` +} + +// Artifacts is a collection of Artifact entities +type Artifacts struct { + RockerArtifacts []Artifact `yaml:"RockerArtifacts"` +} + +// GetFileName constructs the base file name out of the image info +func (a *Artifact) GetFileName() string { + imageName := strings.Replace(a.Name.Name, "/", "_", -1) + return fmt.Sprintf("%s_%s.yml", imageName, a.Name.GetTag()) +} + +// Len returns the length of image tags +func (a *Artifacts) Len() int { + return len(a.RockerArtifacts) +} + +// Less returns true if item by index[i] is created after of item[j] +func (a *Artifacts) Less(i, j int) bool { + return a.RockerArtifacts[i].Name.Tag > a.RockerArtifacts[j].Name.Tag +} + +// Swap swaps items by indices [i] and [j] +func (a *Artifacts) Swap(i, j int) { + a.RockerArtifacts[i], a.RockerArtifacts[j] = a.RockerArtifacts[j], a.RockerArtifacts[i] +} From 6e258e40239e19e484ce9a4df5e6c1ff11d6259c Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Tue, 20 Oct 2015 17:51:42 +0300 Subject: [PATCH 110/131] #45 rocker/template: `image` helper that can read artifacts --- src/cmd/rocker/main.go | 21 +++++- src/rocker/template/README.md | 34 ++++++++++ src/rocker/template/template.go | 69 ++++++++++++++++++++ src/rocker/template/template_test.go | 98 ++++++++++++++++++++++++++++ src/rocker/template/vars.go | 80 +++++++++++++++++++++-- src/rocker/template/vars_test.go | 37 +++++++++++ 6 files changed, 332 insertions(+), 7 deletions(-) diff --git a/src/cmd/rocker/main.go b/src/cmd/rocker/main.go index eef50053..8fb871d9 100644 --- a/src/cmd/rocker/main.go +++ b/src/cmd/rocker/main.go @@ -98,6 +98,11 @@ func main() { Value: &cli.StringSlice{}, Usage: "set variables to pass to build tasks, value is like \"key=value\"", }, + cli.StringSliceFlag{ + Name: "vars", + Value: &cli.StringSlice{}, + Usage: "Load variables form a file, either JSON or YAML. Can pass multiple of this.", + }, cli.BoolFlag{ Name: "no-cache", Usage: "supresses cache for docker builds", @@ -135,6 +140,10 @@ func main() { Name: "print", Usage: "just print the Rockerfile after template processing and stop", }, + cli.BoolFlag{ + Name: "demand-artifacts", + Usage: "fail if artifacts not found for {{ image }} helpers", + }, cli.StringFlag{ Name: "id", Usage: "override the default id generation strategy for current build", @@ -184,12 +193,22 @@ func buildCommand(c *cli.Context) { initLogs(c) + vars, err := template.VarsFromFileMulti(c.StringSlice("vars")) + if err != nil { + log.Fatal(err) + os.Exit(1) + } + cliVars, err := template.VarsFromStrings(c.StringSlice("var")) if err != nil { log.Fatal(err) } - vars := template.Vars{}.Merge(cliVars) + vars = vars.Merge(cliVars) + + if c.Bool("demand-artifacts") { + vars["DemandArtifacts"] = true + } // obtain git info about current directory // gitInfo, err := git.Info(filepath.Dir(configFilename)) diff --git a/src/rocker/template/README.md b/src/rocker/template/README.md index 63824297..f7cf4bf3 100644 --- a/src/rocker/template/README.md +++ b/src/rocker/template/README.md @@ -153,6 +153,40 @@ If the `Version` variable is not given, then template processing will fail with Error executing template TEMPLATE_NAME, error: template: TEMPLATE_NAME:1:3: executing \"TEMPLATE_NAME\" at : error calling assert: Assertion failed ``` +### {{ image *docker_image_name_with_tag* }} or {{ image *docker_image_name* *tag* }} +Wrapper that is used to substitute images of particular versions derived by artifacts *(TODO: link to artifacts doc)*. + +Example: +```Dockerfile +FROM {{ image "ubuntu" }} +# OR +FROM {{ image "ubuntu:latest" }} +# OR +FROM {{ image "ubuntu" "latest" }} +``` + +Without any additional arguments it will resolve into this: +```Dockerfile +FROM ubuntu:latest +``` + +But if you have an artifact that is resulted by a previous rocker build, that can be fed back to rocker as variable, the artifact will be substituted: +```yaml +# shorten version of an artifact by rocker +RockerArtifacts: +- Name: ubuntu:latest + Digest: sha256:ead434cd278824865d6e3b67e5d4579ded02eb2e8367fc165efa21138b225f11 +``` + +```Dockerfile +# rocker build -vars artifacts/* +FROM ubuntu@sha256:ead434cd278824865d6e3b67e5d4579ded02eb2e8367fc165efa21138b225f11 +``` + +This feature is useful when you have a continuous integration pipeline and you want to build images on top of each other with guaranteed immutability. Also, this trick can be used with [rocker-compose](https://github.com/grammarly/rocker-compose) to run images of particular versions devired by the artifacts. + +*TODO: also describe semver matching behavior* + # Variables `rocker/template` automatically populates [os.Environ](https://golang.org/pkg/os/#Environ) to the template along with the variables that are passed from the outside. All environment variables are available under `.Env`. diff --git a/src/rocker/template/template.go b/src/rocker/template/template.go index 1365bb01..9478d81d 100644 --- a/src/rocker/template/template.go +++ b/src/rocker/template/template.go @@ -24,12 +24,16 @@ import ( "io/ioutil" "os" "reflect" + "rocker/imagename" + "sort" "strconv" "strings" "text/template" "github.com/go-yaml/yaml" "github.com/kr/pretty" + + log "github.com/Sirupsen/logrus" ) // Funs is the list of additional helpers that may be given to the template @@ -61,6 +65,7 @@ func Process(name string, reader io.Reader, vars Vars, funs Funs) (*bytes.Buffer "json": jsonFn, "shell": EscapeShellarg, "yaml": yamlFn, + "image": makeImageHelper(vars), // `image` helper needs to make a closure on Vars // strings functions "compare": strings.Compare, @@ -239,6 +244,70 @@ func indent(prefix, s string) string { return strings.Join(res, "\n") } +func makeImageHelper(vars Vars) func(string, ...string) (string, error) { + // Sort artifacts so we match semver on latest item + var ( + artifacts = &imagename.Artifacts{} + ok bool + ) + + if artifacts.RockerArtifacts, ok = vars["RockerArtifacts"].([]imagename.Artifact); !ok { + artifacts.RockerArtifacts = []imagename.Artifact{} + } + + sort.Sort(artifacts) + + log.Debugf("`image` helper got artifacts: %# v", pretty.Formatter(artifacts)) + + return func(img string, args ...string) (string, error) { + var ( + matched bool + ok bool + shouldMatch bool + image = imagename.NewFromString(img) + ) + + if len(args) > 0 { + image = imagename.New(img, args[0]) + } + + for _, a := range artifacts.RockerArtifacts { + if !image.IsSameKind(*a.Name) { + continue + } + + if image.HasVersionRange() { + if !image.Contains(a.Name) { + log.Debugf("Skipping artifact %s because it is not suitable for %s", a.Name, image) + continue + } + } else if image.GetTag() != a.Name.GetTag() { + log.Debugf("Skipping artifact %s because it is not suitable for %s", a.Name, image) + continue + } + + if a.Digest != "" { + log.Debugf("Apply digest %s for image %s", a.Digest, image) + image.SetTag(a.Digest) + matched = true + break + } + if a.Name.HasTag() { + log.Debugf("Apply tag %s for image %s", a.Name.GetTag(), image) + image.SetTag(a.Name.GetTag()) + matched = true + break + } + } + + if shouldMatch, ok = vars["DemandArtifacts"].(bool); ok && shouldMatch && !matched { + return "", fmt.Errorf("Cannot find suitable artifact for image %s", image) + } + + return image.String(), nil + } +} + func interfaceToInt(v interface{}) (int, error) { switch v.(type) { case int: diff --git a/src/rocker/template/template_test.go b/src/rocker/template/template_test.go index 837dfc0f..5d563a53 100644 --- a/src/rocker/template/template_test.go +++ b/src/rocker/template/template_test.go @@ -19,6 +19,7 @@ package template import ( "fmt" "os" + "rocker/imagename" "strings" "testing" @@ -32,6 +33,27 @@ var ( "data": map[string]string{ "foo": "bar", }, + "RockerArtifacts": []imagename.Artifact{ + imagename.Artifact{ + Name: imagename.NewFromString("alpine:3.2"), + Tag: "3.2", + }, + imagename.Artifact{ + Name: imagename.NewFromString("golang:1.5"), + Tag: "1.5", + Digest: "sha256:ead434", + }, + imagename.Artifact{ + Name: imagename.NewFromString("data:master"), + Tag: "master", + Digest: "sha256:fafe14", + }, + imagename.Artifact{ + Name: imagename.NewFromString("ssh:latest"), + Tag: "latest", + Digest: "sha256:ba41cd", + }, + }, } ) @@ -120,6 +142,77 @@ func TestProcess_YamlIndent(t *testing.T) { assert.Equal(t, "key:\n foo: bar\n", processTemplate(t, "key:\n{{ .data | yaml 1 }}")) } +func TestProcess_Image_Simple(t *testing.T) { + tests := []struct { + tpl string + result string + message string + }{ + {"{{ image `debian:7.7` }}", "debian:7.7", "should not alter the tag that is not in artifacts"}, + {"{{ image `debian` `7.7` }}", "debian:7.7", "should be possible to specify tag as a separate argument"}, + {"{{ image `debian` `sha256:afa` }}", "debian@sha256:afa", "should be possible to specify digest as a separate argument"}, + } + + for _, test := range tests { + assert.Equal(t, test.result, processTemplate(t, test.tpl), test.message) + } +} + +func TestProcess_Image_Advanced(t *testing.T) { + tests := []struct { + in string + result string + shouldMatch bool + message string + }{ + {"debian:7.7", "debian:7.7", false, "should not alter the tag that is not in artifacts"}, + {"debian:7.*", "debian:7.*", false, "should not alter the semver tag that is not in artifacts"}, + {"debian", "debian:latest", false, "should not match anything when no tag given (:latest) and no artifact"}, + {"alpine:3.1", "alpine:3.1", false, "should not match artifact with different version"}, + {"alpine:4.1", "alpine:4.1", false, "should not match artifact with different version"}, + {"alpine:3.*", "alpine:3.2", true, "should match artifact with version wildcard"}, + {"alpine", "alpine:latest", false, "should not match artifact when no tag given (:latest by default)"}, + {"alpine:latest", "alpine:latest", false, "should not match on a :latest tag"}, + {"alpine:snapshot", "alpine:snapshot", false, "should not match on a named tag"}, + {"golang:1.5", "golang@sha256:ead434", true, "should match semver tag and use digest"}, + {"golang:1.*", "golang@sha256:ead434", true, "should match on wildcard semver tag and use digest"}, + {"golang:1", "golang@sha256:ead434", true, "should match on prefix semver tag and use digest"}, + {"golang:1.4", "golang:1.4", false, "should not match on different semver tag"}, + {"golang:master", "golang:master", false, "should not match on a named tag"}, + {"data:1.2", "data:1.2", false, "should not match on a version tag against named artifact"}, + {"data:snapshot", "data:snapshot", false, "should not match on a different named tag against named artifact"}, + {"data:master", "data@sha256:fafe14", true, "should match on a same named tag against named artifact"}, + {"ssh:latest", "ssh@sha256:ba41cd", true, "should match on a :latest tag against :latest artifact"}, + {"ssh", "ssh@sha256:ba41cd", true, "should match on non-tagged tag against :latest artifact"}, + {"ssh:master", "ssh:master", false, "should match with other tag against :latest artifact"}, + {"ssh:1.2", "ssh:1.2", false, "should match with semver tag against :latest artifact"}, + } + + for _, test := range tests { + tpl := fmt.Sprintf("{{ image `%s` }}", test.in) + assert.Equal(t, test.result, processTemplate(t, tpl), test.message) + } + + // Now test the same but with DemandArtifact On + configTemplateVars["DemandArtifacts"] = true + defer func() { + configTemplateVars["DemandArtifacts"] = false + }() + + for _, test := range tests { + tpl := fmt.Sprintf("{{ image `%s` }}", test.in) + if test.shouldMatch { + assert.Equal(t, test.result, processTemplate(t, tpl), test.message) + } else { + err := processTemplateReturnError(t, tpl) + assert.Error(t, err, fmt.Sprintf("should give an error for test case: %s", test.message)) + if err != nil { + assert.Contains(t, err.Error(), fmt.Sprintf("Cannot find suitable artifact for image %s", test.in), test.message) + } + } + } +} + func processTemplate(t *testing.T, tpl string) string { result, err := Process("test", strings.NewReader(tpl), configTemplateVars, map[string]interface{}{}) if err != nil { @@ -127,3 +220,8 @@ func processTemplate(t *testing.T, tpl string) string { } return result.String() } + +func processTemplateReturnError(t *testing.T, tpl string) error { + _, err := Process("test", strings.NewReader(tpl), configTemplateVars, map[string]interface{}{}) + return err +} diff --git a/src/rocker/template/vars.go b/src/rocker/template/vars.go index 9231c47d..4f449c90 100644 --- a/src/rocker/template/vars.go +++ b/src/rocker/template/vars.go @@ -23,11 +23,15 @@ import ( "os" "path" "path/filepath" + "reflect" "regexp" + "rocker/imagename" "sort" "strings" "github.com/go-yaml/yaml" + + log "github.com/Sirupsen/logrus" ) // Vars describes the data structure of the build variables @@ -37,7 +41,16 @@ type Vars map[string]interface{} func (vars Vars) Merge(varsList ...Vars) Vars { for _, mergeWith := range varsList { for k, v := range mergeWith { - vars[k] = v + // We want to merge slices of the same type by appending them to each other + // instead of overwriting + rv1 := reflect.ValueOf(vars[k]) + rv2 := reflect.ValueOf(v) + + if rv1.Kind() == reflect.Slice && rv2.Kind() == reflect.Slice && rv1.Type() == rv2.Type() { + vars[k] = reflect.AppendSlice(rv1, rv2).Interface() + } else { + vars[k] = v + } } } return vars @@ -91,6 +104,29 @@ func (vars *Vars) UnmarshalJSON(data []byte) (err error) { return nil } +// UnmarshalYAML parses YAML string and returns Vars +func (vars *Vars) UnmarshalYAML(unmarshal func(interface{}) error) (err error) { + // try unmarshal RockerArtifacts type + var artifacts imagename.Artifacts + if err = unmarshal(&artifacts); err != nil { + return err + } + + var value map[string]interface{} + if err = unmarshal(&value); err != nil { + return err + } + + // Fill artifacts if present + if len(artifacts.RockerArtifacts) > 0 { + value["RockerArtifacts"] = artifacts.RockerArtifacts + } + + *vars = value + + return nil +} + // VarsFromStrings parses Vars through ParseKvPairs and then loads content from files // for vars values with "@" prefix func VarsFromStrings(pairs []string) (vars Vars, err error) { @@ -117,6 +153,7 @@ func VarsFromStrings(pairs []string) (vars Vars, err error) { // VarsFromFile reads variables from either JSON or YAML file func VarsFromFile(filename string) (vars Vars, err error) { + log.Debugf("Load vars from file %s", filename) if filename, err = resolveFileName(filename); err != nil { return nil, err @@ -144,13 +181,32 @@ func VarsFromFile(filename string) (vars Vars, err error) { } // VarsFromFileMulti reads multiple files and merge vars -func VarsFromFileMulti(files []string) (vars Vars, err error) { - varsList := make([]Vars, len(files)) - for i, f := range files { - if varsList[i], err = VarsFromFile(f); err != nil { - return nil, err +func VarsFromFileMulti(files []string) (Vars, error) { + var ( + varsList = []Vars{} + matches []string + vars Vars + err error + ) + + for _, pat := range files { + // TODO: error if file not found (when not using wildcards) + matches = []string{pat} + + if containsWildcards(pat) { + if matches, err = filepath.Glob(pat); err != nil { + return nil, err + } + } + + for _, f := range matches { + if vars, err = VarsFromFile(f); err != nil { + return nil, err + } + varsList = append(varsList, vars) } } + return Vars{}.Merge(varsList...), nil } @@ -225,3 +281,15 @@ func (vars Vars) ReplaceString(str string) string { return str } + +func containsWildcards(name string) bool { + for i := 0; i < len(name); i++ { + ch := name[i] + if ch == '\\' { + i++ + } else if ch == '*' || ch == '?' || ch == '[' { + return true + } + } + return false +} diff --git a/src/rocker/template/vars_test.go b/src/rocker/template/vars_test.go index 6408cf37..4fdcca07 100644 --- a/src/rocker/template/vars_test.go +++ b/src/rocker/template/vars_test.go @@ -22,12 +22,25 @@ import ( "io/ioutil" "os" "path" + "rocker/imagename" "rocker/test" "testing" "github.com/stretchr/testify/assert" ) +func TestVars_MergeSlices(t *testing.T) { + v1 := Vars{ + "fruits": []string{"banana", "apple"}, + } + v2 := Vars{ + "fruits": []string{"pear", "orange"}, + } + v3 := v1.Merge(v2) + + assert.Equal(t, []string{"banana", "apple", "pear", "orange"}, v3["fruits"].([]string)) +} + func TestVarsToStrings(t *testing.T) { t.Parallel() @@ -100,6 +113,8 @@ func TestVarsFromStrings(t *testing.T) { } } +// TODO: test VarsFromFileMulti + func TestVarsFromFile_Yaml(t *testing.T) { tempDir, rm := tplMkFiles(t, map[string]string{ "vars.yml": ` @@ -118,6 +133,28 @@ Bar: yes assert.Equal(t, true, vars["Bar"]) } +func TestVarsFromFile_Yaml_Artifacts(t *testing.T) { + tempDir, rm := tplMkFiles(t, map[string]string{ + "vars.yml": ` +Foo: x +Bar: yes +RockerArtifacts: +- Name: golang:1.5 + Tag: "1.5" +`, + }) + defer rm() + + vars, err := VarsFromFile(tempDir + "/vars.yml") + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, "x", vars["Foo"]) + assert.Equal(t, true, vars["Bar"]) + assert.IsType(t, []imagename.Artifact{}, vars["RockerArtifacts"]) +} + func TestVarsFromFile_Json(t *testing.T) { tempDir, rm := tplMkFiles(t, map[string]string{ "vars.json": ` From 51b6dce83b3f7b39f2a3843b00c57a5c614c0950 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Tue, 20 Oct 2015 17:52:54 +0300 Subject: [PATCH 111/131] #46 Ability to lookup images by fuzzy semver tags --- src/rocker/build/build_test.go | 7 ++- src/rocker/build/client.go | 82 +++++++++++++++++++++++++++++-- src/rocker/build/commands.go | 17 ++----- src/rocker/build/commands_test.go | 36 ++------------ 4 files changed, 92 insertions(+), 50 deletions(-) diff --git a/src/rocker/build/build_test.go b/src/rocker/build/build_test.go index a3d2d125..86a24247 100644 --- a/src/rocker/build/build_test.go +++ b/src/rocker/build/build_test.go @@ -47,7 +47,7 @@ func TestBuild_ReplaceEnvVars(t *testing.T) { resultImage := &docker.Image{ID: "789"} - c.On("InspectImage", "ubuntu").Return(img, nil).Once() + c.On("LookupImage", "ubuntu", false).Return(img, nil).Once() c.On("CreateContainer", mock.AnythingOfType("State")).Return("456", nil).Run(func(args mock.Arguments) { arg := args.Get(0).(State) @@ -98,6 +98,11 @@ func (m *MockClient) PullImage(name string) error { return args.Error(0) } +func (m *MockClient) LookupImage(name string, pull bool) (*docker.Image, error) { + args := m.Called(name, pull) + return args.Get(0).(*docker.Image), args.Error(1) +} + func (m *MockClient) RemoveImage(imageID string) error { args := m.Called(imageID) return args.Error(0) diff --git a/src/rocker/build/client.go b/src/rocker/build/client.go index e66fffd2..7883677d 100644 --- a/src/rocker/build/client.go +++ b/src/rocker/build/client.go @@ -41,6 +41,7 @@ import ( type Client interface { InspectImage(name string) (*docker.Image, error) PullImage(name string) error + LookupImage(name string, pull bool) (*docker.Image, error) RemoveImage(imageID string) error TagImage(imageID, imageName string) error PushImage(imageName string) (digest string, err error) @@ -74,10 +75,9 @@ func NewDockerClient(dockerClient *docker.Client, auth docker.AuthConfiguration) // InspectImage inspects docker image // it does not give an error when image not found, but returns nil instead -func (c *DockerClient) InspectImage(name string) (*docker.Image, error) { - img, err := c.client.InspectImage(name) +func (c *DockerClient) InspectImage(name string) (img *docker.Image, err error) { // We simply return nil in case image not found - if err == docker.ErrNoSuchImage { + if img, err = c.client.InspectImage(name); err == docker.ErrNoSuchImage { return nil, nil } return img, err @@ -121,6 +121,82 @@ func (c *DockerClient) PullImage(name string) error { return <-errch } +// LookupImage looks up for the image by name and returns *docker.Image object (result of the inspect) +// `pull` param defines whether we want to update the latest version of the image from the remote registry +// +// If `pull` is false, it tries to lookup locally by exact matching, e.g. if the image is already +// pulled with that exact name given (no fuzzy semver matching) +// +// Then the function fetches the list of all pulled images and tries to match one of them by the given name. +// +// If `pull` is set to true or if it cannot find the image locally, it then fetches all image +// tags from the remote registry and finds the best match for the given image name. +// +// If it cannot find the image either locally or in the remote registry, it returns `nil` +func (c *DockerClient) LookupImage(name string, pull bool) (img *docker.Image, err error) { + var ( + imgName = imagename.NewFromString(name) + localImages = []*imagename.ImageName{} + candidate *imagename.ImageName + remoteCandidate *imagename.ImageName + dockerImages []docker.APIImages + remoteImages []*imagename.ImageName + ) + + // If pull is true, then there is no sense to inspect the local image + if !pull { + // Try to inspect image as is, without version resolution + if img, err := c.InspectImage(name); err != nil || img != nil { + return img, err + } + } + + // List local images + if dockerImages, err = c.client.ListImages(docker.ListImagesOptions{}); err != nil { + return nil, err + } + for _, image := range dockerImages { + for _, repoTag := range image.RepoTags { + localImages = append(localImages, imagename.NewFromString(repoTag)) + } + } + + // Resolve local candidate + candidate = imgName.ResolveVersion(localImages) + + // In case we want to include external images as well, pulling list of available + // images from repository or central docker hub + if pull || candidate == nil { + log.Debugf("Getting list of tags for %s from the registry", imgName) + + if remoteImages, err = imagename.RegistryListTags(imgName); err != nil { + err = fmt.Errorf("Failed to list tags of image %s from the remote registry, error: %s", imgName, err) + } + + // Since we found the remove image, we want to pull it + if remoteCandidate = imgName.ResolveVersion(remoteImages); remoteCandidate != nil { + if err = c.PullImage(remoteCandidate.String()); err != nil { + return + } + candidate = remoteCandidate + } + } + + // If not candidate found, it's an error + if candidate == nil { + err = fmt.Errorf("Image not found: %s (also checked in the remote registry)", imgName) + return + } + + if remoteCandidate != nil { + log.Infof("Resolve %s --> %s (found remotely)", imgName, candidate.GetTag()) + } else { + log.Infof("Resolve %s --> %s", imgName, candidate.GetTag()) + } + + return c.InspectImage(candidate.String()) +} + // RemoveImage removes docker image func (c *DockerClient) RemoveImage(imageID string) error { log.Infof("| Remove image %.12s", imageID) diff --git a/src/rocker/build/commands.go b/src/rocker/build/commands.go index 5ec1554c..79d1bc78 100644 --- a/src/rocker/build/commands.go +++ b/src/rocker/build/commands.go @@ -157,23 +157,12 @@ func (c *CommandFrom) Execute(b *Build) (s State, err error) { return s, nil } - // If Pull is true, then img will remain nil and it will be pulled below - if !b.cfg.Pull { - if img, err = b.client.InspectImage(name); err != nil { - return s, err - } + if img, err = b.client.LookupImage(name, b.cfg.Pull); err != nil { + return s, fmt.Errorf("FROM: Failed to lookup image: %s, error: %s", name, err) } if img == nil { - if err = b.client.PullImage(name); err != nil { - return s, err - } - if img, err = b.client.InspectImage(name); err != nil { - return s, err - } - if img == nil { - return s, fmt.Errorf("FROM: Failed to inspect image after pull: %s", name) - } + return s, fmt.Errorf("FROM: image %s not found", name) } // We want to say the size of the FROM image. Better to do it diff --git a/src/rocker/build/commands_test.go b/src/rocker/build/commands_test.go index 0ae7543a..e23c622f 100644 --- a/src/rocker/build/commands_test.go +++ b/src/rocker/build/commands_test.go @@ -43,7 +43,7 @@ func TestCommandFrom_Existing(t *testing.T) { }, } - c.On("InspectImage", "existing").Return(img, nil).Once() + c.On("LookupImage", "existing", false).Return(img, nil).Once() state, err := cmd.Execute(b) if err != nil { @@ -68,8 +68,7 @@ func TestCommandFrom_PullExisting(t *testing.T) { }, } - c.On("PullImage", "existing").Return(nil).Once() - c.On("InspectImage", "existing").Return(img, nil).Once() + c.On("LookupImage", "existing", true).Return(img, nil).Once() state, err := cmd.Execute(b) if err != nil { @@ -89,38 +88,11 @@ func TestCommandFrom_NotExisting(t *testing.T) { var nilImg *docker.Image - img := &docker.Image{ - ID: "123", - Config: &docker.Config{}, - } - - c.On("InspectImage", "not-existing").Return(nilImg, nil).Once() - c.On("PullImage", "not-existing").Return(nil).Once() - c.On("InspectImage", "not-existing").Return(img, nil).Once() - - state, err := cmd.Execute(b) - if err != nil { - t.Fatal(err) - } - - c.AssertExpectations(t) - assert.Equal(t, "123", state.ImageID) -} - -func TestCommandFrom_AfterPullNotExisting(t *testing.T) { - b, c := makeBuild(t, "", Config{}) - cmd := &CommandFrom{ConfigCommand{ - args: []string{"not-existing"}, - }} - - var nilImg *docker.Image - - c.On("InspectImage", "not-existing").Return(nilImg, nil).Twice() - c.On("PullImage", "not-existing").Return(nil).Once() + c.On("LookupImage", "not-existing", false).Return(nilImg, nil).Once() _, err := cmd.Execute(b) c.AssertExpectations(t) - assert.Equal(t, "FROM: Failed to inspect image after pull: not-existing", err.Error()) + assert.Equal(t, "FROM: image not-existing not found", err.Error()) } // =========== Testing RUN =========== From a927214db443f8a37eecc2d21295d71f09999e53 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Tue, 20 Oct 2015 20:03:39 +0300 Subject: [PATCH 112/131] #46 fix: local matching was looking on tags only --- src/rocker/build/client.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/rocker/build/client.go b/src/rocker/build/client.go index 7883677d..8514f536 100644 --- a/src/rocker/build/client.go +++ b/src/rocker/build/client.go @@ -157,7 +157,9 @@ func (c *DockerClient) LookupImage(name string, pull bool) (img *docker.Image, e } for _, image := range dockerImages { for _, repoTag := range image.RepoTags { - localImages = append(localImages, imagename.NewFromString(repoTag)) + if n := imagename.NewFromString(repoTag); imgName.IsSameKind(*n) { + localImages = append(localImages, n) + } } } From a15f97a5135484a2f018e74fcf326cf6b7e2da64 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Tue, 20 Oct 2015 20:04:36 +0300 Subject: [PATCH 113/131] #46 fix: imagename by when listing non-library images' tages --- src/rocker/imagename/registry.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/rocker/imagename/registry.go b/src/rocker/imagename/registry.go index a7bb17aa..f7967421 100644 --- a/src/rocker/imagename/registry.go +++ b/src/rocker/imagename/registry.go @@ -21,6 +21,7 @@ import ( "fmt" "io/ioutil" "net/http" + "strings" "github.com/fsouza/go-dockerclient" ) @@ -95,8 +96,13 @@ func RegistryListTags(image *ImageName) (images []*ImageName, err error) { // registryListTagsDockerHub lists image tags from hub.docker.com func registryListTagsDockerHub(image *ImageName) (images []*ImageName, err error) { + name := image.Name + if !strings.Contains(name, "/") { + name = "library/" + name + } + tg := registryTags{} - if err = registryGet(fmt.Sprintf("https://hub.docker.com/v2/repositories/library/%s/tags/?page_size=9999&page=1", image.Name), &tg); err != nil { + if err = registryGet(fmt.Sprintf("https://hub.docker.com/v2/repositories/%s/tags/?page_size=9999&page=1", name), &tg); err != nil { return } From 31b4c079ce09e9e0eb3411c111180a0c7726b37c Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Tue, 20 Oct 2015 20:53:30 +0300 Subject: [PATCH 114/131] #46 make it work with @sha256 tags --- src/rocker/build/client.go | 78 ++++++++++++++++++++++---------------- 1 file changed, 46 insertions(+), 32 deletions(-) diff --git a/src/rocker/build/client.go b/src/rocker/build/client.go index 8514f536..f8bc42b9 100644 --- a/src/rocker/build/client.go +++ b/src/rocker/build/client.go @@ -122,53 +122,61 @@ func (c *DockerClient) PullImage(name string) error { } // LookupImage looks up for the image by name and returns *docker.Image object (result of the inspect) -// `pull` param defines whether we want to update the latest version of the image from the remote registry +// `hub` param defines whether we want to update the latest version of the image from the remote registry // -// If `pull` is false, it tries to lookup locally by exact matching, e.g. if the image is already +// If `hub` is false, it tries to lookup locally by exact matching, e.g. if the image is already // pulled with that exact name given (no fuzzy semver matching) // // Then the function fetches the list of all pulled images and tries to match one of them by the given name. // -// If `pull` is set to true or if it cannot find the image locally, it then fetches all image +// If `hub` is set to true or if it cannot find the image locally, it then fetches all image // tags from the remote registry and finds the best match for the given image name. // // If it cannot find the image either locally or in the remote registry, it returns `nil` -func (c *DockerClient) LookupImage(name string, pull bool) (img *docker.Image, err error) { +func (c *DockerClient) LookupImage(name string, hub bool) (img *docker.Image, err error) { var ( - imgName = imagename.NewFromString(name) - localImages = []*imagename.ImageName{} - candidate *imagename.ImageName - remoteCandidate *imagename.ImageName - dockerImages []docker.APIImages - remoteImages []*imagename.ImageName + imgName = imagename.NewFromString(name) + localImages = []*imagename.ImageName{} + remoteImages []*imagename.ImageName + candidate, remoteCandidate *imagename.ImageName + dockerImages []docker.APIImages + pull = false + isSha = imgName.TagIsSha() ) - // If pull is true, then there is no sense to inspect the local image - if !pull { + // If hub is true, then there is no sense to inspect the local image + if !hub || isSha { // Try to inspect image as is, without version resolution if img, err := c.InspectImage(name); err != nil || img != nil { return img, err } } - // List local images - if dockerImages, err = c.client.ListImages(docker.ListImagesOptions{}); err != nil { - return nil, err - } - for _, image := range dockerImages { - for _, repoTag := range image.RepoTags { - if n := imagename.NewFromString(repoTag); imgName.IsSameKind(*n) { - localImages = append(localImages, n) + if isSha { + // If we are still here and image not found locally, we want to pull it + candidate = imgName + hub = false + pull = true + } else { + // List local images + if dockerImages, err = c.client.ListImages(docker.ListImagesOptions{}); err != nil { + return nil, err + } + for _, image := range dockerImages { + for _, repoTag := range image.RepoTags { + if n := imagename.NewFromString(repoTag); imgName.IsSameKind(*n) { + localImages = append(localImages, n) + } } } - } - // Resolve local candidate - candidate = imgName.ResolveVersion(localImages) + // Resolve local candidate + candidate = imgName.ResolveVersion(localImages) + } // In case we want to include external images as well, pulling list of available - // images from repository or central docker hub - if pull || candidate == nil { + // images from the remote registry + if hub || candidate == nil { log.Debugf("Getting list of tags for %s from the registry", imgName) if remoteImages, err = imagename.RegistryListTags(imgName); err != nil { @@ -177,9 +185,7 @@ func (c *DockerClient) LookupImage(name string, pull bool) (img *docker.Image, e // Since we found the remove image, we want to pull it if remoteCandidate = imgName.ResolveVersion(remoteImages); remoteCandidate != nil { - if err = c.PullImage(remoteCandidate.String()); err != nil { - return - } + pull = true candidate = remoteCandidate } } @@ -190,10 +196,18 @@ func (c *DockerClient) LookupImage(name string, pull bool) (img *docker.Image, e return } - if remoteCandidate != nil { - log.Infof("Resolve %s --> %s (found remotely)", imgName, candidate.GetTag()) - } else { - log.Infof("Resolve %s --> %s", imgName, candidate.GetTag()) + if !isSha { + if remoteCandidate != nil { + log.Infof("Resolve %s --> %s (found remotely)", imgName, candidate.GetTag()) + } else { + log.Infof("Resolve %s --> %s", imgName, candidate.GetTag()) + } + } + + if pull { + if err = c.PullImage(candidate.String()); err != nil { + return + } } return c.InspectImage(candidate.String()) From 83adea9dd4e08b1383396f2acd26082bdf51cd12 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Wed, 21 Oct 2015 17:48:08 +0300 Subject: [PATCH 115/131] clean comment --- src/rocker/template/vars.go | 1 - 1 file changed, 1 deletion(-) diff --git a/src/rocker/template/vars.go b/src/rocker/template/vars.go index 4f449c90..09bf9c9a 100644 --- a/src/rocker/template/vars.go +++ b/src/rocker/template/vars.go @@ -190,7 +190,6 @@ func VarsFromFileMulti(files []string) (Vars, error) { ) for _, pat := range files { - // TODO: error if file not found (when not using wildcards) matches = []string{pat} if containsWildcards(pat) { From a86d7310cd246ddf2a4e3dc4d6cae71d2b2f2d02 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Wed, 21 Oct 2015 17:48:28 +0300 Subject: [PATCH 116/131] #46 refactoring and unit tests --- src/rocker/build/build.go | 91 ++++++++++++++ src/rocker/build/build_test.go | 192 +++++++++++++++++++++++++++++- src/rocker/build/client.go | 98 +++------------ src/rocker/build/commands.go | 4 +- src/rocker/build/commands_test.go | 35 ++---- src/rocker/imagename/imagename.go | 5 + 6 files changed, 308 insertions(+), 117 deletions(-) diff --git a/src/rocker/build/build.go b/src/rocker/build/build.go index cb0d5ca9..855c2ec3 100644 --- a/src/rocker/build/build.go +++ b/src/rocker/build/build.go @@ -19,6 +19,7 @@ package build import ( "fmt" "io" + "rocker/imagename" "github.com/docker/docker/pkg/units" "github.com/fatih/color" @@ -249,3 +250,93 @@ func (b *Build) getExportsContainer() (name string, err error) { return containerID, nil } + +// lookupImage looks up for the image by name and returns *docker.Image object (result of the inspect) +// `hub` param defines whether we want to update the latest version of the image from the remote registry +// +// TODO: update me! +// +// If `hub` is false, it tries to lookup locally by exact matching, e.g. if the image is already +// pulled with that exact name given (no fuzzy semver matching) +// +// Then the function fetches the list of all pulled images and tries to match one of them by the given name. +// +// If `hub` is set to true or if it cannot find the image locally, it then fetches all image +// tags from the remote registry and finds the best match for the given image name. +// +// If it cannot find the image either locally or in the remote registry, it returns `nil` +func (b *Build) lookupImage(name string) (img *docker.Image, err error) { + var ( + candidate, remoteCandidate *imagename.ImageName + + imgName = imagename.NewFromString(name) + pull = false + hub = b.cfg.Pull + isSha = imgName.TagIsSha() + ) + + // If hub is true, then there is no sense to inspect the local image + if !hub || isSha { + // Try to inspect image as is, without version resolution + if img, err := b.client.InspectImage(name); err != nil || img != nil { + return img, err + } + } + + if isSha { + // If we are still here and image not found locally, we want to pull it + candidate = imgName + hub = false + pull = true + } + + if !isSha && !hub { + // List local images + var localImages = []*imagename.ImageName{} + if localImages, err = b.client.ListImages(); err != nil { + return nil, err + } + // Resolve local candidate + candidate = imgName.ResolveVersion(localImages) + } + + // In case we want to include external images as well, pulling list of available + // images from the remote registry + if hub || candidate == nil { + log.Debugf("Getting list of tags for %s from the registry", imgName) + + var remoteImages []*imagename.ImageName + + if remoteImages, err = b.client.ListImageTags(imgName.String()); err != nil { + err = fmt.Errorf("Failed to list tags of image %s from the remote registry, error: %s", imgName, err) + } + + // Since we found the remote image, we want to pull it + if remoteCandidate = imgName.ResolveVersion(remoteImages); remoteCandidate != nil { + pull = true + candidate = remoteCandidate + } + } + + // If not candidate found, it's an error + if candidate == nil { + err = fmt.Errorf("Image not found: %s (also checked in the remote registry)", imgName) + return + } + + if !isSha && imgName.GetTag() != candidate.GetTag() { + if remoteCandidate != nil { + log.Infof("Resolve %s --> %s (found remotely)", imgName, candidate.GetTag()) + } else { + log.Infof("Resolve %s --> %s", imgName, candidate.GetTag()) + } + } + + if pull { + if err = b.client.PullImage(candidate.String()); err != nil { + return + } + } + + return b.client.InspectImage(candidate.String()) +} diff --git a/src/rocker/build/build_test.go b/src/rocker/build/build_test.go index 86a24247..f68b972a 100644 --- a/src/rocker/build/build_test.go +++ b/src/rocker/build/build_test.go @@ -18,6 +18,7 @@ package build import ( "io" + "rocker/imagename" "rocker/template" "runtime" "strings" @@ -47,7 +48,7 @@ func TestBuild_ReplaceEnvVars(t *testing.T) { resultImage := &docker.Image{ID: "789"} - c.On("LookupImage", "ubuntu", false).Return(img, nil).Once() + c.On("InspectImage", "ubuntu").Return(img, nil).Once() c.On("CreateContainer", mock.AnythingOfType("State")).Return("456", nil).Run(func(args mock.Arguments) { arg := args.Get(0).(State) @@ -63,6 +64,184 @@ func TestBuild_ReplaceEnvVars(t *testing.T) { } } +func TestBuild_LookupImage_ExactExistLocally(t *testing.T) { + var ( + b, c = makeBuild(t, "", Config{}) + resultImage = &docker.Image{ID: "789"} + name = "ubuntu:latest" + ) + + c.On("InspectImage", name).Return(resultImage, nil).Once() + + result, err := b.lookupImage(name) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, resultImage, result) + c.AssertExpectations(t) +} + +func TestBuild_LookupImage_ExistLocally(t *testing.T) { + var ( + nilImage *docker.Image + + b, c = makeBuild(t, "", Config{}) + resultImage = &docker.Image{ID: "789"} + name = "ubuntu:latest" + + localImages = []*imagename.ImageName{ + imagename.NewFromString("debian:7.7"), + imagename.NewFromString("debian:latest"), + imagename.NewFromString("ubuntu:12.04"), + imagename.NewFromString("ubuntu:14.04"), + imagename.NewFromString("ubuntu:latest"), + } + ) + + c.On("InspectImage", name).Return(nilImage, nil).Once() + c.On("ListImages").Return(localImages, nil).Once() + c.On("InspectImage", name).Return(resultImage, nil).Once() + + result, err := b.lookupImage(name) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, resultImage, result) + c.AssertExpectations(t) +} + +func TestBuild_LookupImage_NotExistLocally(t *testing.T) { + var ( + nilImage *docker.Image + + b, c = makeBuild(t, "", Config{}) + resultImage = &docker.Image{ID: "789"} + name = "ubuntu:latest" + + localImages = []*imagename.ImageName{} + + remoteImages = []*imagename.ImageName{ + imagename.NewFromString("debian:7.7"), + imagename.NewFromString("debian:latest"), + imagename.NewFromString("ubuntu:12.04"), + imagename.NewFromString("ubuntu:14.04"), + imagename.NewFromString("ubuntu:latest"), + } + ) + + c.On("InspectImage", name).Return(nilImage, nil).Once() + c.On("ListImages").Return(localImages, nil).Once() + c.On("ListImageTags", name).Return(remoteImages, nil).Once() + c.On("PullImage", name).Return(nil).Once() + c.On("InspectImage", name).Return(resultImage, nil).Once() + + result, err := b.lookupImage(name) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, resultImage, result) + c.AssertExpectations(t) +} + +func TestBuild_LookupImage_PullAndExist(t *testing.T) { + var ( + b, c = makeBuild(t, "", Config{Pull: true}) + resultImage = &docker.Image{ID: "789"} + name = "ubuntu:latest" + + remoteImages = []*imagename.ImageName{ + imagename.NewFromString("debian:7.7"), + imagename.NewFromString("debian:latest"), + imagename.NewFromString("ubuntu:12.04"), + imagename.NewFromString("ubuntu:14.04"), + imagename.NewFromString("ubuntu:latest"), + } + ) + + c.On("ListImageTags", name).Return(remoteImages, nil).Once() + c.On("PullImage", name).Return(nil).Once() + c.On("InspectImage", name).Return(resultImage, nil).Once() + + result, err := b.lookupImage(name) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, resultImage, result) + c.AssertExpectations(t) +} + +func TestBuild_LookupImage_PullAndNotExist(t *testing.T) { + var ( + b, c = makeBuild(t, "", Config{Pull: true}) + name = "ubuntu:latest" + + remoteImages = []*imagename.ImageName{ + imagename.NewFromString("debian:7.7"), + imagename.NewFromString("debian:latest"), + imagename.NewFromString("ubuntu:12.04"), + imagename.NewFromString("ubuntu:14.04"), + } + ) + + c.On("ListImageTags", name).Return(remoteImages, nil).Once() + + _, err := b.lookupImage(name) + assert.EqualError(t, err, "Image not found: ubuntu:latest (also checked in the remote registry)") + c.AssertExpectations(t) +} + +func TestBuild_LookupImage_ShaExistLocally(t *testing.T) { + for _, pull := range []bool{true, false} { + t.Logf("Testing with pull=%t", pull) + + var ( + b, c = makeBuild(t, "", Config{Pull: pull}) + resultImage = &docker.Image{ID: "789"} + name = "ubuntu@sha256:afafa" + ) + + c.On("InspectImage", name).Return(resultImage, nil).Once() + + result, err := b.lookupImage(name) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, resultImage, result) + c.AssertExpectations(t) + } +} + +func TestBuild_LookupImage_ShaNotExistLocally(t *testing.T) { + for _, pull := range []bool{true, false} { + t.Logf("Testing with pull=%t", pull) + + var ( + nilImage *docker.Image + + b, c = makeBuild(t, "", Config{Pull: pull}) + resultImage = &docker.Image{ID: "789"} + name = "ubuntu@sha256:afafa" + ) + + c.On("InspectImage", name).Return(nilImage, nil).Once() + c.On("PullImage", name).Return(nil).Once() + c.On("InspectImage", name).Return(resultImage, nil).Once() + + result, err := b.lookupImage(name) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, resultImage, result) + c.AssertExpectations(t) + } +} + // internal helpers func makeBuild(t *testing.T, rockerfileContent string, cfg Config) (*Build, *MockClient) { @@ -98,9 +277,14 @@ func (m *MockClient) PullImage(name string) error { return args.Error(0) } -func (m *MockClient) LookupImage(name string, pull bool) (*docker.Image, error) { - args := m.Called(name, pull) - return args.Get(0).(*docker.Image), args.Error(1) +func (m *MockClient) ListImages() (images []*imagename.ImageName, err error) { + args := m.Called() + return args.Get(0).([]*imagename.ImageName), args.Error(1) +} + +func (m *MockClient) ListImageTags(name string) (images []*imagename.ImageName, err error) { + args := m.Called(name) + return args.Get(0).([]*imagename.ImageName), args.Error(1) } func (m *MockClient) RemoveImage(imageID string) error { diff --git a/src/rocker/build/client.go b/src/rocker/build/client.go index f8bc42b9..2a70e535 100644 --- a/src/rocker/build/client.go +++ b/src/rocker/build/client.go @@ -41,7 +41,8 @@ import ( type Client interface { InspectImage(name string) (*docker.Image, error) PullImage(name string) error - LookupImage(name string, pull bool) (*docker.Image, error) + ListImages() (images []*imagename.ImageName, err error) + ListImageTags(name string) (images []*imagename.ImageName, err error) RemoveImage(imageID string) error TagImage(imageID, imageName string) error PushImage(imageName string) (digest string, err error) @@ -121,96 +122,27 @@ func (c *DockerClient) PullImage(name string) error { return <-errch } -// LookupImage looks up for the image by name and returns *docker.Image object (result of the inspect) -// `hub` param defines whether we want to update the latest version of the image from the remote registry -// -// If `hub` is false, it tries to lookup locally by exact matching, e.g. if the image is already -// pulled with that exact name given (no fuzzy semver matching) -// -// Then the function fetches the list of all pulled images and tries to match one of them by the given name. -// -// If `hub` is set to true or if it cannot find the image locally, it then fetches all image -// tags from the remote registry and finds the best match for the given image name. -// -// If it cannot find the image either locally or in the remote registry, it returns `nil` -func (c *DockerClient) LookupImage(name string, hub bool) (img *docker.Image, err error) { - var ( - imgName = imagename.NewFromString(name) - localImages = []*imagename.ImageName{} - remoteImages []*imagename.ImageName - candidate, remoteCandidate *imagename.ImageName - dockerImages []docker.APIImages - pull = false - isSha = imgName.TagIsSha() - ) - - // If hub is true, then there is no sense to inspect the local image - if !hub || isSha { - // Try to inspect image as is, without version resolution - if img, err := c.InspectImage(name); err != nil || img != nil { - return img, err - } - } - - if isSha { - // If we are still here and image not found locally, we want to pull it - candidate = imgName - hub = false - pull = true - } else { - // List local images - if dockerImages, err = c.client.ListImages(docker.ListImagesOptions{}); err != nil { - return nil, err - } - for _, image := range dockerImages { - for _, repoTag := range image.RepoTags { - if n := imagename.NewFromString(repoTag); imgName.IsSameKind(*n) { - localImages = append(localImages, n) - } - } - } - - // Resolve local candidate - candidate = imgName.ResolveVersion(localImages) - } - - // In case we want to include external images as well, pulling list of available - // images from the remote registry - if hub || candidate == nil { - log.Debugf("Getting list of tags for %s from the registry", imgName) - - if remoteImages, err = imagename.RegistryListTags(imgName); err != nil { - err = fmt.Errorf("Failed to list tags of image %s from the remote registry, error: %s", imgName, err) - } - - // Since we found the remove image, we want to pull it - if remoteCandidate = imgName.ResolveVersion(remoteImages); remoteCandidate != nil { - pull = true - candidate = remoteCandidate - } - } +// ListImages lists all pulled images in the local docker registry +func (c *DockerClient) ListImages() (images []*imagename.ImageName, err error) { - // If not candidate found, it's an error - if candidate == nil { - err = fmt.Errorf("Image not found: %s (also checked in the remote registry)", imgName) + var dockerImages []docker.APIImages + if dockerImages, err = c.client.ListImages(docker.ListImagesOptions{}); err != nil { return } - if !isSha { - if remoteCandidate != nil { - log.Infof("Resolve %s --> %s (found remotely)", imgName, candidate.GetTag()) - } else { - log.Infof("Resolve %s --> %s", imgName, candidate.GetTag()) + images = []*imagename.ImageName{} + for _, image := range dockerImages { + for _, repoTag := range image.RepoTags { + images = append(images, imagename.NewFromString(repoTag)) } } - if pull { - if err = c.PullImage(candidate.String()); err != nil { - return - } - } + return +} - return c.InspectImage(candidate.String()) +// ListImageTags returns the list of images instances obtained from all tags existing in the registry +func (c *DockerClient) ListImageTags(name string) (images []*imagename.ImageName, err error) { + return imagename.RegistryListTags(imagename.NewFromString(name)) } // RemoveImage removes docker image diff --git a/src/rocker/build/commands.go b/src/rocker/build/commands.go index 79d1bc78..7d2dad25 100644 --- a/src/rocker/build/commands.go +++ b/src/rocker/build/commands.go @@ -157,8 +157,8 @@ func (c *CommandFrom) Execute(b *Build) (s State, err error) { return s, nil } - if img, err = b.client.LookupImage(name, b.cfg.Pull); err != nil { - return s, fmt.Errorf("FROM: Failed to lookup image: %s, error: %s", name, err) + if img, err = b.lookupImage(name); err != nil { + return s, fmt.Errorf("FROM error: %s", err) } if img == nil { diff --git a/src/rocker/build/commands_test.go b/src/rocker/build/commands_test.go index e23c622f..3a72f845 100644 --- a/src/rocker/build/commands_test.go +++ b/src/rocker/build/commands_test.go @@ -19,6 +19,7 @@ package build import ( "fmt" "reflect" + "rocker/imagename" "testing" "github.com/kr/pretty" @@ -43,32 +44,7 @@ func TestCommandFrom_Existing(t *testing.T) { }, } - c.On("LookupImage", "existing", false).Return(img, nil).Once() - - state, err := cmd.Execute(b) - if err != nil { - t.Fatal(err) - } - - c.AssertExpectations(t) - assert.Equal(t, "123", state.ImageID) - assert.Equal(t, "localhost", state.Config.Hostname) -} - -func TestCommandFrom_PullExisting(t *testing.T) { - b, c := makeBuild(t, "", Config{Pull: true}) - cmd := &CommandFrom{ConfigCommand{ - args: []string{"existing"}, - }} - - img := &docker.Image{ - ID: "123", - Config: &docker.Config{ - Hostname: "localhost", - }, - } - - c.On("LookupImage", "existing", true).Return(img, nil).Once() + c.On("InspectImage", "existing").Return(img, nil).Once() state, err := cmd.Execute(b) if err != nil { @@ -87,12 +63,15 @@ func TestCommandFrom_NotExisting(t *testing.T) { }} var nilImg *docker.Image + var nilList []*imagename.ImageName - c.On("LookupImage", "not-existing", false).Return(nilImg, nil).Once() + c.On("InspectImage", "not-existing").Return(nilImg, nil).Once() + c.On("ListImages").Return(nilList, nil).Once() + c.On("ListImageTags", "not-existing:latest").Return(nilList, nil).Once() _, err := cmd.Execute(b) c.AssertExpectations(t) - assert.Equal(t, "FROM: image not-existing not found", err.Error()) + assert.Equal(t, "FROM error: Image not found: not-existing:latest (also checked in the remote registry)", err.Error()) } // =========== Testing RUN =========== diff --git a/src/rocker/imagename/imagename.go b/src/rocker/imagename/imagename.go index 0e9d9cd6..e021d863 100644 --- a/src/rocker/imagename/imagename.go +++ b/src/rocker/imagename/imagename.go @@ -225,6 +225,11 @@ func (img ImageName) Contains(b *ImageName) bool { // ResolveVersion finds an applicable tag for current image among the list of available tags func (img *ImageName) ResolveVersion(list []*ImageName) (result *ImageName) { for _, candidate := range list { + // If these are different images (different names/repos) + if !img.IsSameKind(*candidate) { + continue + } + // If we have a strict equality if img.HasTag() && candidate.HasTag() && img.Tag == candidate.Tag { return candidate From 04f749d53b25a6231e935b03a9c4627f02ce7efe Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Wed, 21 Oct 2015 18:00:04 +0300 Subject: [PATCH 117/131] #45 display info about applying artifacts --- src/rocker/template/template.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/rocker/template/template.go b/src/rocker/template/template.go index 9478d81d..70db1d61 100644 --- a/src/rocker/template/template.go +++ b/src/rocker/template/template.go @@ -287,13 +287,13 @@ func makeImageHelper(vars Vars) func(string, ...string) (string, error) { } if a.Digest != "" { - log.Debugf("Apply digest %s for image %s", a.Digest, image) + log.Infof("Apply artifact digest %s for image %s", a.Digest, image) image.SetTag(a.Digest) matched = true break } if a.Name.HasTag() { - log.Debugf("Apply tag %s for image %s", a.Name.GetTag(), image) + log.Infof("Apply artifact tag %s for image %s", a.Name.GetTag(), image) image.SetTag(a.Name.GetTag()) matched = true break From 3861751ba5b7217e2e4b9e0e342ddc42609fd366 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Wed, 21 Oct 2015 18:00:24 +0300 Subject: [PATCH 118/131] `-print` be silent if not in debug mode --- src/cmd/rocker/main.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/cmd/rocker/main.go b/src/cmd/rocker/main.go index 8fb871d9..0b45f27d 100644 --- a/src/cmd/rocker/main.go +++ b/src/cmd/rocker/main.go @@ -193,6 +193,12 @@ func buildCommand(c *cli.Context) { initLogs(c) + // We don't want info level for 'print' mode + // So log only errors unless 'debug' is on + if c.Bool("print") && log.StandardLogger().Level != log.DebugLevel { + log.StandardLogger().Level = log.ErrorLevel + } + vars, err := template.VarsFromFileMulti(c.StringSlice("vars")) if err != nil { log.Fatal(err) From f002787730c4465f61eb8c2bd2ec08702cd6fe7f Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Wed, 21 Oct 2015 18:01:02 +0300 Subject: [PATCH 119/131] cleanup commented stuff from main --- src/cmd/rocker/main.go | 40 ---------------------------------------- 1 file changed, 40 deletions(-) diff --git a/src/cmd/rocker/main.go b/src/cmd/rocker/main.go index 0b45f27d..32d0e5cf 100644 --- a/src/cmd/rocker/main.go +++ b/src/cmd/rocker/main.go @@ -216,22 +216,6 @@ func buildCommand(c *cli.Context) { vars["DemandArtifacts"] = true } - // obtain git info about current directory - // gitInfo, err := git.Info(filepath.Dir(configFilename)) - // if err != nil { - // // Ignore if given directory is not a git repo - // if _, ok := err.(*git.ErrNotGitRepo); !ok { - // log.Fatal(err) - // } - // } - - // // some additional useful vars - // vars["commit"] = stringOr(os.Getenv("GIT_COMMIT"), gitInfo.Sha) - // vars["branch"] = stringOr(os.Getenv("GIT_BRANCH"), gitInfo.Branch) - // vars["git_url"] = stringOr(os.Getenv("GIT_URL"), gitInfo.URL) - // vars["commit_message"] = gitInfo.Message - // vars["commit_author"] = gitInfo.Author - wd, err := os.Getwd() if err != nil { log.Fatal(err) @@ -346,30 +330,6 @@ func buildCommand(c *cli.Context) { ) log.Infof("Successfully built %.12s | %s", builder.GetImageID(), size) - - // builder := build.Builder{ - // Rockerfile: configFilename, - // ContextDir: contextDir, - // UtilizeCache: !c.Bool("no-cache"), - // Push: c.Bool("push"), - // NoReuse: c.Bool("no-reuse"), - // Verbose: c.Bool("verbose"), - // Attach: c.Bool("attach"), - // Print: c.Bool("print"), - // Auth: auth, - // Vars: vars, - // CliVars: cliVars, - // InStream: os.Stdin, - // OutStream: os.Stdout, - // Docker: dockerClient, - // AddMeta: c.Bool("meta"), - // Pull: c.Bool("pull"), - // ID: c.String("id"), - // } - - // if _, err := builder.Build(); err != nil { - // log.Fatal(err) - // } } func cleanCommand(c *cli.Context) { From 3b5905678b83b1b5cc4f22557bed523ff4d2438e Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Wed, 21 Oct 2015 18:06:26 +0300 Subject: [PATCH 120/131] #46 update doc --- src/rocker/build/build.go | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/src/rocker/build/build.go b/src/rocker/build/build.go index 855c2ec3..e38c67e8 100644 --- a/src/rocker/build/build.go +++ b/src/rocker/build/build.go @@ -252,19 +252,23 @@ func (b *Build) getExportsContainer() (name string, err error) { } // lookupImage looks up for the image by name and returns *docker.Image object (result of the inspect) -// `hub` param defines whether we want to update the latest version of the image from the remote registry +// `Pull` config option defines whether we want to update the latest version of the image from the remote registry +// See build.Config struct for more details about other build config options. // -// TODO: update me! -// -// If `hub` is false, it tries to lookup locally by exact matching, e.g. if the image is already +// If `Pull` is false, it tries to lookup locally by exact matching, e.g. if the image is already // pulled with that exact name given (no fuzzy semver matching) // // Then the function fetches the list of all pulled images and tries to match one of them by the given name. // -// If `hub` is set to true or if it cannot find the image locally, it then fetches all image +// If `Pull` is set to true or if it cannot find the image locally, it then fetches all image // tags from the remote registry and finds the best match for the given image name. // // If it cannot find the image either locally or in the remote registry, it returns `nil` +// +// In case the given image has sha256 tag, it looks for it locally and pulls if it's not found. +// No semver matching is done for sha256 tagged images. +// +// See also TestBuild_LookupImage_* test cases in build_test.go func (b *Build) lookupImage(name string) (img *docker.Image, err error) { var ( candidate, remoteCandidate *imagename.ImageName From b49d412c510dc20076b944f8b0fa425ad73dab43 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Tue, 3 Nov 2015 12:47:56 +0200 Subject: [PATCH 121/131] add TODO about pushImage error --- src/rocker/build/client.go | 1 + 1 file changed, 1 insertion(+) diff --git a/src/rocker/build/client.go b/src/rocker/build/client.go index 2a70e535..b1d619df 100644 --- a/src/rocker/build/client.go +++ b/src/rocker/build/client.go @@ -424,6 +424,7 @@ func (c *DockerClient) PushImage(imageName string) (digest string, err error) { log.Debugf("Push with options: %# v", opts) + // TODO: DisplayJSONMessagesStream may fail by client.PushImage run without errors go func() { if err := jsonmessage.DisplayJSONMessagesStream(pipeReader, out, fdOut, isTerminalOut); err != nil { log.Errorf("Failed to process json stream, error %s", err) From 8632d6d097135c71dfff9b4f0b1bbe00eebf39b7 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Tue, 3 Nov 2015 12:57:43 +0200 Subject: [PATCH 122/131] dockerclient: Fix Windows inability to handle a tilde as the home directory --- src/rocker/dockerclient/dockerclient.go | 6 + vendor/manifest | 6 + .../github.com/mitchellh/go-homedir/LICENSE | 21 +++ .../github.com/mitchellh/go-homedir/README.md | 14 ++ .../mitchellh/go-homedir/homedir.go | 132 ++++++++++++++++++ .../mitchellh/go-homedir/homedir_test.go | 112 +++++++++++++++ 6 files changed, 291 insertions(+) create mode 100644 vendor/src/github.com/mitchellh/go-homedir/LICENSE create mode 100644 vendor/src/github.com/mitchellh/go-homedir/README.md create mode 100644 vendor/src/github.com/mitchellh/go-homedir/homedir.go create mode 100644 vendor/src/github.com/mitchellh/go-homedir/homedir_test.go diff --git a/src/rocker/dockerclient/dockerclient.go b/src/rocker/dockerclient/dockerclient.go index 7077c897..d9994b02 100644 --- a/src/rocker/dockerclient/dockerclient.go +++ b/src/rocker/dockerclient/dockerclient.go @@ -29,6 +29,7 @@ import ( "github.com/codegangsta/cli" "github.com/fsouza/go-dockerclient" + "github.com/mitchellh/go-homedir" ) var ( @@ -50,6 +51,11 @@ func NewConfig() *Config { certPath := os.Getenv("DOCKER_CERT_PATH") if certPath == "" { certPath = "~/.docker" + homePath, err := homedir.Dir() + if err != nil { + log.Fatal(err) + } + certPath = homePath + "/.docker" } host := os.Getenv("DOCKER_HOST") if host == "" { diff --git a/vendor/manifest b/vendor/manifest index 62cf6977..ee633cc0 100644 --- a/vendor/manifest +++ b/vendor/manifest @@ -143,6 +143,12 @@ "revision": "02ae137b1d309729c32110aac6e315e798ba4f0e", "branch": "master", "path": "/pkg/fileutils" + }, + { + "importpath": "github.com/mitchellh/go-homedir", + "repository": "https://github.com/mitchellh/go-homedir", + "revision": "d682a8f0cf139663a984ff12528da460ca963de9", + "branch": "master" } ] } \ No newline at end of file diff --git a/vendor/src/github.com/mitchellh/go-homedir/LICENSE b/vendor/src/github.com/mitchellh/go-homedir/LICENSE new file mode 100644 index 00000000..f9c841a5 --- /dev/null +++ b/vendor/src/github.com/mitchellh/go-homedir/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/src/github.com/mitchellh/go-homedir/README.md b/vendor/src/github.com/mitchellh/go-homedir/README.md new file mode 100644 index 00000000..d70706d5 --- /dev/null +++ b/vendor/src/github.com/mitchellh/go-homedir/README.md @@ -0,0 +1,14 @@ +# go-homedir + +This is a Go library for detecting the user's home directory without +the use of cgo, so the library can be used in cross-compilation environments. + +Usage is incredibly simple, just call `homedir.Dir()` to get the home directory +for a user, and `homedir.Expand()` to expand the `~` in a path to the home +directory. + +**Why not just use `os/user`?** The built-in `os/user` package requires +cgo on Darwin systems. This means that any Go code that uses that package +cannot cross compile. But 99% of the time the use for `os/user` is just to +retrieve the home directory, which we can do for the current user without +cgo. This library does that, enabling cross-compilation. diff --git a/vendor/src/github.com/mitchellh/go-homedir/homedir.go b/vendor/src/github.com/mitchellh/go-homedir/homedir.go new file mode 100644 index 00000000..6944957d --- /dev/null +++ b/vendor/src/github.com/mitchellh/go-homedir/homedir.go @@ -0,0 +1,132 @@ +package homedir + +import ( + "bytes" + "errors" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" +) + +// DisableCache will disable caching of the home directory. Caching is enabled +// by default. +var DisableCache bool + +var homedirCache string +var cacheLock sync.RWMutex + +// Dir returns the home directory for the executing user. +// +// This uses an OS-specific method for discovering the home directory. +// An error is returned if a home directory cannot be detected. +func Dir() (string, error) { + if !DisableCache { + cacheLock.RLock() + cached := homedirCache + cacheLock.RUnlock() + if cached != "" { + return cached, nil + } + } + + cacheLock.Lock() + defer cacheLock.Unlock() + + var result string + var err error + if runtime.GOOS == "windows" { + result, err = dirWindows() + } else { + // Unix-like system, so just assume Unix + result, err = dirUnix() + } + + if err != nil { + return "", err + } + homedirCache = result + return result, nil +} + +// Expand expands the path to include the home directory if the path +// is prefixed with `~`. If it isn't prefixed with `~`, the path is +// returned as-is. +func Expand(path string) (string, error) { + if len(path) == 0 { + return path, nil + } + + if path[0] != '~' { + return path, nil + } + + if len(path) > 1 && path[1] != '/' && path[1] != '\\' { + return "", errors.New("cannot expand user-specific home dir") + } + + dir, err := Dir() + if err != nil { + return "", err + } + + return filepath.Join(dir, path[1:]), nil +} + +func dirUnix() (string, error) { + // First prefer the HOME environmental variable + if home := os.Getenv("HOME"); home != "" { + return home, nil + } + + // If that fails, try getent + var stdout bytes.Buffer + cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid())) + cmd.Stdout = &stdout + if err := cmd.Run(); err != nil { + // If "getent" is missing, ignore it + if err != exec.ErrNotFound { + return "", err + } + } else { + if passwd := strings.TrimSpace(stdout.String()); passwd != "" { + // username:password:uid:gid:gecos:home:shell + passwdParts := strings.SplitN(passwd, ":", 7) + if len(passwdParts) > 5 { + return passwdParts[5], nil + } + } + } + + // If all else fails, try the shell + stdout.Reset() + cmd = exec.Command("sh", "-c", "cd && pwd") + cmd.Stdout = &stdout + if err := cmd.Run(); err != nil { + return "", err + } + + result := strings.TrimSpace(stdout.String()) + if result == "" { + return "", errors.New("blank output when reading home directory") + } + + return result, nil +} + +func dirWindows() (string, error) { + drive := os.Getenv("HOMEDRIVE") + path := os.Getenv("HOMEPATH") + home := drive + path + if drive == "" || path == "" { + home = os.Getenv("USERPROFILE") + } + if home == "" { + return "", errors.New("HOMEDRIVE, HOMEPATH, and USERPROFILE are blank") + } + + return home, nil +} diff --git a/vendor/src/github.com/mitchellh/go-homedir/homedir_test.go b/vendor/src/github.com/mitchellh/go-homedir/homedir_test.go new file mode 100644 index 00000000..c34dbc7f --- /dev/null +++ b/vendor/src/github.com/mitchellh/go-homedir/homedir_test.go @@ -0,0 +1,112 @@ +package homedir + +import ( + "fmt" + "os" + "os/user" + "testing" +) + +func patchEnv(key, value string) func() { + bck := os.Getenv(key) + deferFunc := func() { + os.Setenv(key, bck) + } + + os.Setenv(key, value) + return deferFunc +} + +func BenchmarkDir(b *testing.B) { + // We do this for any "warmups" + for i := 0; i < 10; i++ { + Dir() + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + Dir() + } +} + +func TestDir(t *testing.T) { + u, err := user.Current() + if err != nil { + t.Fatalf("err: %s", err) + } + + dir, err := Dir() + if err != nil { + t.Fatalf("err: %s", err) + } + + if u.HomeDir != dir { + t.Fatalf("%#v != %#v", u.HomeDir, dir) + } +} + +func TestExpand(t *testing.T) { + u, err := user.Current() + if err != nil { + t.Fatalf("err: %s", err) + } + + cases := []struct { + Input string + Output string + Err bool + }{ + { + "/foo", + "/foo", + false, + }, + + { + "~/foo", + fmt.Sprintf("%s/foo", u.HomeDir), + false, + }, + + { + "", + "", + false, + }, + + { + "~", + u.HomeDir, + false, + }, + + { + "~foo/foo", + "", + true, + }, + } + + for _, tc := range cases { + actual, err := Expand(tc.Input) + if (err != nil) != tc.Err { + t.Fatalf("Input: %#v\n\nErr: %s", tc.Input, err) + } + + if actual != tc.Output { + t.Fatalf("Input: %#v\n\nOutput: %#v", tc.Input, actual) + } + } + + DisableCache = true + defer func() { DisableCache = false }() + defer patchEnv("HOME", "/custom/path/")() + expected := "/custom/path/foo/bar" + actual, err := Expand("~/foo/bar") + + if err != nil { + t.Errorf("No error is expected, got: %v", err) + } else if actual != "/custom/path/foo/bar" { + t.Errorf("Expected: %v; actual: %v", expected, actual) + } +} From bab7101b04b1cbfe4ccc00d39c73e690af5710a9 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Tue, 3 Nov 2015 17:53:28 +0200 Subject: [PATCH 123/131] FROM: fix running from images with null config --- src/rocker/build/commands.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/rocker/build/commands.go b/src/rocker/build/commands.go index 7d2dad25..f18217ff 100644 --- a/src/rocker/build/commands.go +++ b/src/rocker/build/commands.go @@ -175,7 +175,11 @@ func (c *CommandFrom) Execute(b *Build) (s State, err error) { s = b.state s.ImageID = img.ID - s.Config = *img.Config + s.Config = docker.Config{} + + if img.Config != nil { + s.Config = *img.Config + } b.ProducedSize = 0 b.VirtualSize = img.VirtualSize From 4724058e04e7c24a63dc636e430f404a11f20cad Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Tue, 10 Nov 2015 16:07:57 +0200 Subject: [PATCH 124/131] make [-cmd, -C] flag that prints current command; remove clean Useful to debug arguments in teamcity --- src/cmd/rocker/main.go | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/src/cmd/rocker/main.go b/src/cmd/rocker/main.go index 32d0e5cf..10397bd0 100644 --- a/src/cmd/rocker/main.go +++ b/src/cmd/rocker/main.go @@ -80,6 +80,9 @@ func main() { cli.BoolTFlag{ Name: "colors", }, + cli.BoolFlag{ + Name: "cmd, C", + }, }, dockerclient.GlobalCliParams()...) buildFlags := []cli.Flag{ @@ -164,11 +167,7 @@ func main() { Usage: "launches a build for the specified Rockerfile", Action: buildCommand, Flags: buildFlags, - }, - { - Name: "clean", - Usage: "complete a task on the list", - Action: cleanCommand, + Before: globalBefore, }, dockerclient.InfoCommandSpec(), } @@ -184,6 +183,13 @@ func main() { } } +func globalBefore(c *cli.Context) error { + if c.GlobalBool("cmd") { + log.Infof("Cmd: %s", strings.Join(os.Args, " ")) + } + return nil +} + func buildCommand(c *cli.Context) { var ( @@ -332,12 +338,6 @@ func buildCommand(c *cli.Context) { log.Infof("Successfully built %.12s | %s", builder.GetImageID(), size) } -func cleanCommand(c *cli.Context) { - verbose := c.Bool("verbose") - fmt.Println("verbose") - fmt.Println(verbose) -} - func initLogs(ctx *cli.Context) { logger := log.StandardLogger() From 8166012851fca03bfcb64ee482dc9b3e74c21314 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Mon, 16 Nov 2015 16:25:51 +0200 Subject: [PATCH 125/131] fix #48 --- src/rocker/build/copy.go | 38 ++++++++++++++++------------------- src/rocker/build/copy_test.go | 33 ++++++++++++++++++++++++++++++ 2 files changed, 50 insertions(+), 21 deletions(-) diff --git a/src/rocker/build/copy.go b/src/rocker/build/copy.go index 5897cd6f..5c523d4f 100644 --- a/src/rocker/build/copy.go +++ b/src/rocker/build/copy.go @@ -330,14 +330,23 @@ func listFiles(srcPath string, includes, excludes []string) ([]*uploadFile, erro seen[relFilePath] = struct{}{} // cut the wildcard path of the file or use base name - var resultFilePath string - if containsWildcards(pattern) { - common := commonPrefix(pattern, relFilePath) - resultFilePath = strings.Replace(relFilePath, common, "", 1) + + var ( + resultFilePath string + baseChunks = splitPath(pattern) + destChunks = splitPath(relFilePath) + lastChunk = baseChunks[len(baseChunks)-1] + ) + + if containsWildcards(lastChunk) { + // In case there is `foo/bar/*` source path we need to make a + // destination files without `foo/bar/` prefix + resultFilePath = filepath.Join(destChunks[len(baseChunks)-1:]...) } else if matchInfo.IsDir() { - common := commonPrefix(pattern, match) - resultFilePath = strings.Replace(relFilePath, common, "", 1) + // If source is a directory, keep as is + resultFilePath = relFilePath } else { + // The source has referred to a file resultFilePath = filepath.Base(relFilePath) } @@ -372,21 +381,8 @@ func containsWildcards(name string) bool { return false } -func commonPrefix(a, b string) (prefix string) { - // max length of either a or b - l := len(a) - if len(b) > l { - l = len(b) - } - // find common prefix - for i := 0; i < l; i++ { - if a[i] != b[i] { - break - } - // not optimal, but I don't care - prefix = prefix + string(a[i]) - } - return +func splitPath(path string) []string { + return strings.Split(path, string(os.PathSeparator)) } type nestedPattern struct { diff --git a/src/rocker/build/copy_test.go b/src/rocker/build/copy_test.go index 1158a8dd..4a44132e 100644 --- a/src/rocker/build/copy_test.go +++ b/src/rocker/build/copy_test.go @@ -755,6 +755,39 @@ func TestCopy_MakeTarStream_SubDirRenameWildcard(t *testing.T) { assert.Equal(t, assertion, out, "bad tar content") } +func TestCopy_MakeTarStream_WierdWildcards(t *testing.T) { + tmpDir := makeTmpDir(t, map[string]string{ + "abc.txt": "hello", + "adf.txt": "hello", + "bvz.txt": "hello", + }) + defer os.RemoveAll(tmpDir) + + includes := []string{ + "a*.txt", + } + excludes := []string{} + dest := "./" + + t.Logf("includes: %# v", pretty.Formatter(includes)) + t.Logf("excludes: %# v", pretty.Formatter(excludes)) + t.Logf("dest: %# v", pretty.Formatter(dest)) + + stream, err := makeTarStream(tmpDir, dest, "COPY", includes, excludes) + if err != nil { + t.Fatal(err) + } + + out := writeReadTar(t, tmpDir, stream.tar) + + assertion := strings.Join([]string{ + "./abc.txt", + "./adf.txt", + }, "\n") + "\n" + + assert.Equal(t, assertion, out, "bad tar content") +} + func TestCopy_MakeTarStream_SingleFileDirRename(t *testing.T) { tmpDir := makeTmpDir(t, map[string]string{ "c/foo.txt": "hello", From d6a4bbfc503169324de3462a2ae2bb5a5dc2f041 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Mon, 16 Nov 2015 19:35:44 +0200 Subject: [PATCH 126/131] rocker/debugtrap: show goroutine stack on SIGUSR1 --- src/cmd/rocker/main.go | 2 ++ src/rocker/debugtrap/debugtrap_unix.go | 23 +++++++++++++++++++ src/rocker/debugtrap/debugtrap_unsupported.go | 9 ++++++++ 3 files changed, 34 insertions(+) create mode 100644 src/rocker/debugtrap/debugtrap_unix.go create mode 100644 src/rocker/debugtrap/debugtrap_unsupported.go diff --git a/src/cmd/rocker/main.go b/src/cmd/rocker/main.go index 10397bd0..0d042553 100644 --- a/src/cmd/rocker/main.go +++ b/src/cmd/rocker/main.go @@ -23,6 +23,7 @@ import ( "strings" "rocker/build" + "rocker/debugtrap" "rocker/dockerclient" "rocker/template" "rocker/textformatter" @@ -53,6 +54,7 @@ var ( func init() { log.SetOutput(os.Stdout) log.SetLevel(log.InfoLevel) + debugtrap.SetupDumpStackTrap() } func main() { diff --git a/src/rocker/debugtrap/debugtrap_unix.go b/src/rocker/debugtrap/debugtrap_unix.go new file mode 100644 index 00000000..097ae854 --- /dev/null +++ b/src/rocker/debugtrap/debugtrap_unix.go @@ -0,0 +1,23 @@ +// +build !windows + +package debugtrap + +import ( + "os" + "os/signal" + "syscall" + + psignal "github.com/docker/docker/pkg/signal" +) + +// SetupDumpStackTrap set up a handler for SIGUSR1 and dumps +// the goroutine stack trace to INFO log +func SetupDumpStackTrap() { + c := make(chan os.Signal, 1) + signal.Notify(c, syscall.SIGUSR1) + go func() { + for range c { + psignal.DumpStacks() + } + }() +} diff --git a/src/rocker/debugtrap/debugtrap_unsupported.go b/src/rocker/debugtrap/debugtrap_unsupported.go new file mode 100644 index 00000000..c640640a --- /dev/null +++ b/src/rocker/debugtrap/debugtrap_unsupported.go @@ -0,0 +1,9 @@ +// +build !linux,!darwin,!freebsd + +package debugtrap + +// SetupDumpStackTrap set up a handler for SIGUSR1 and dumps +// the goroutine stack trace to INFO log +func SetupDumpStackTrap() { + return +} From ae470f0f3cd2ccf7fb9d9f95c7cb93029897a57c Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Wed, 18 Nov 2015 22:41:59 +0200 Subject: [PATCH 127/131] fix build-cross --- Rockerfile.build-cross | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Rockerfile.build-cross b/Rockerfile.build-cross index 8d4783d7..7f8c61f6 100644 --- a/Rockerfile.build-cross +++ b/Rockerfile.build-cross @@ -1,4 +1,4 @@ -FROM dockerhub.grammarly.io/golang-1.4.2-cross:v2 +FROM dockerhub.grammarly.io/golang-1.5.1-cross ADD . /src WORKDIR /src From 501e0856806df59ad7d50165a48e123ef66c9b0a Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Thu, 19 Nov 2015 01:17:02 +0200 Subject: [PATCH 128/131] fix: attach MOUNTed volumes for EXPORT action --- src/rocker/build/commands.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/rocker/build/commands.go b/src/rocker/build/commands.go index f18217ff..f6917a7d 100644 --- a/src/rocker/build/commands.go +++ b/src/rocker/build/commands.go @@ -1224,7 +1224,9 @@ func (c *CommandExport) Execute(b *Build) (s State, err error) { }() // Append exports container as a volume - s.NoCache.HostConfig.VolumesFrom = []string{exportsContainerID} + // TODO: test the case when there are imports before + s.NoCache.HostConfig.VolumesFrom = append( + s.NoCache.HostConfig.VolumesFrom, exportsContainerID) cmd := []string{"/opt/rsync/bin/rsync", "-a", "--delete-during"} @@ -1336,7 +1338,11 @@ func (c *CommandImport) Execute(b *Build) (s State, err error) { s.Config.Cmd = cmd s.Config.Entrypoint = []string{} - s.NoCache.HostConfig.VolumesFrom = []string{b.exportsContainerName()} + + // Append exports container as a volume + // TODO: test the case when there are imports before + s.NoCache.HostConfig.VolumesFrom = append( + s.NoCache.HostConfig.VolumesFrom, b.exportsContainerName()) if importID, err = b.client.CreateContainer(s); err != nil { return s, err From 4dc498bac527d154a5ab27656553568af74b0f99 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Mon, 23 Nov 2015 09:16:59 +0200 Subject: [PATCH 129/131] ignore docker client integration test --- src/rocker/dockerclient/dockerclient_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/rocker/dockerclient/dockerclient_test.go b/src/rocker/dockerclient/dockerclient_test.go index d3966f40..ddd5e76d 100644 --- a/src/rocker/dockerclient/dockerclient_test.go +++ b/src/rocker/dockerclient/dockerclient_test.go @@ -1,3 +1,5 @@ +// +build integration + /*- * Copyright 2015 Grammarly, Inc. * From 3557e3e62de55099dce0d82431d76109d010c234 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Mon, 23 Nov 2015 12:21:55 +0200 Subject: [PATCH 130/131] readme v1 intro --- README.md | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index 0cb73ab1..37090c92 100644 --- a/README.md +++ b/README.md @@ -2,21 +2,18 @@ Rocker breaks the limits of Dockerfile. It adds some crucial features that are missing while keeping Docker’s original design and idea. Read the [blog post](http://tech.grammarly.com/blog/posts/Making-Docker-Rock-at-Grammarly.html) about how and why it was invented. -# *NOTE on v1 branch* -In this branch we are developing the new experimental implementation of Rocker that will be completely client-side driven, with no fallback on `docker build`. This means faster builds and more power. No build context uploads anymore. Also, the builder code is completely rewritten and made much more testable and extensible in the future. Caching might be also rethought. Cross-server builds determinism is our dream. +# *v1 NOTE* +Rocker has been rewritten from scratch and now it became much more robust! While [dockramp](https://github.com/jlhawn/dockramp) as a proof of concept of a client-driven Docker builder, Rocker is a full-featured implementation. -Install v1 (you should have golang 1.5): +1. There are no context uploads and fallbacks to `docker build`. It makes your builds faster especially if you have a big project. +2. Cache lookup works much faster than Docker's implementation when you have thousands of layers. +3. Better output: rocker reports size for each produced layer, so you see which steps take space. +4. Works with Docker >= 1.8 -```bash -make -make install -``` - -### v1 TODO +What is not supported yet: -- [x] Cache -- [x] FROM scratch -- [ ] ADD urls and local archives +1. `ADD ` +2. Adding tar archives that supposed to automatically extract --- @@ -56,6 +53,15 @@ Something like this: curl -SL https://github.com/grammarly/rocker/releases/download/0.2.2/rocker-0.2.2_darwin_amd64.tar.gz | tar -xzC /usr/local/bin && chmod +x /usr/local/bin/rocker ``` +### Building locally + +`make` will produce the `bin/rocker` binary. + +```bash +make +make install +``` + ### Getting help, usage: ```bash From 5c382685c9f35a7a488bd66c5c64fdd3a135bac0 Mon Sep 17 00:00:00 2001 From: Yuriy Bogdanov Date: Mon, 23 Nov 2015 12:34:36 +0200 Subject: [PATCH 131/131] v1 readme retouch --- README.md | 102 +++++++----------------------------------------------- 1 file changed, 13 insertions(+), 89 deletions(-) diff --git a/README.md b/README.md index 37090c92..5e63a97a 100644 --- a/README.md +++ b/README.md @@ -25,8 +25,6 @@ What is not supported yet: * [TAG](#tag) * [PUSH](#push) * [Templating](#templating) - * [REQUIRE](#require) - * [INCLUDE](#include) * [ATTACH](#attach) * [Where to go next?](#where-to-go-next) * [Contributing](#contributing) @@ -50,7 +48,7 @@ Go to the [releases](https://github.com/grammarly/rocker/releases) section and d Something like this: ```bash -curl -SL https://github.com/grammarly/rocker/releases/download/0.2.2/rocker-0.2.2_darwin_amd64.tar.gz | tar -xzC /usr/local/bin && chmod +x /usr/local/bin/rocker +curl -SL https://github.com/grammarly/rocker/releases/download/1.0.0/rocker-1.0.0_darwin_amd64.tar.gz | tar -xzC /usr/local/bin && chmod +x /usr/local/bin/rocker ``` ### Building locally @@ -87,7 +85,9 @@ The most challenging part is caching. While implementing those features seems to ### How does it work -Rocker parses the Rockerfile into an AST using the same library Docker uses for parsing Dockerfiles. Then it goes through the instructions and makes a decision, should it execute a command on its own or delegate it to Docker. Internally, Rocker splits a Rockerfile into slices, some of them are executed through Docker’s remote API, some are sent as regular Dockerfiles underneath. This allows to not reimplement the whole thing — only add custom stuff. So if you have a plain Dockerfile, Rocker will not find any custom commands, so it will just pass it straight to Docker. +Rocker parses the Rockerfile into an AST using the same library Docker uses for parsing Dockerfiles. Then it builds a [plan](/src/rocker/build/plan.go) out of instructions and yields a list of commands. For every command there is a function in [commands.go](/src/rocker/build/commands.go) though in the future we will make it extensible. + +The more detailed documentation of internals will come later. # MOUNT @@ -102,10 +102,6 @@ or ```bash MOUNT .:/src ``` -or -```bash -MOUNT $GIT_SSH_KEY:/root/.ssh/id_rsa -``` `MOUNT` is used to share volumes between builds, so they can be reused by tools like dependency management. There are two types of mounts: @@ -114,20 +110,20 @@ MOUNT $GIT_SSH_KEY:/root/.ssh/id_rsa Volume container names are hashed with Rockerfile’s full path and the directories it shares. So as long as your Rockerfile has the same name and it is in the same place — same volume containers will be used. -Note that Rocker is not tracking changes in mounted directories, so no changes can affect caching. Cache will be busted only if you change list of mounts, add or remove them. In future, we may add some configuration flags, so you can specify if you want to watch the actual mount contents changes, and make them invalidate the cache (for example $GIT_SSH_KEY contents may change). +Note that Rocker is not tracking changes in mounted directories, so no changes can affect caching. Cache will be busted only if you change list of mounts, add or remove them. In future, we may add some configuration flags, so you can specify if you want to watch the actual mount contents changes, and make them invalidate the cache. -To force cache invalidation you can always use `--no-cache` flag for `rocker build` command. But you will then need a lot of patience. +To force cache invalidation you can always use `--no-cache` or `--reload-cache` flags for `rocker build` command. But you will then need a lot of patience. **Example usage** ```bash FROM grammarly/nodejs:latest -ADD . /src #1 +ADD . /src #1 WORKDIR /src -MOUNT /src/node_modules /src/bower_components #2 -MOUNT $GIT_SSH_KEY:/root/.ssh/id_rsa #3 -RUN npm install #4 -RUN cp -R /src /app #5 +MOUNT /src/node_modules /src/bower_components #2 +MOUNT {{ .Env.GIT_SSH_KEY }}:/root/.ssh/id_rsa #3 +RUN npm install #4 +RUN cp -R /src /app #5 WORKDIR /app CMD ["/usr/bin/node", "index.js"] ``` @@ -351,66 +347,6 @@ CMD ["/bin/rocker"] PUSH grammarly/rocker:0.1.22 ``` -# REQUIRE - -```bash -REQUIRE foo -``` -or -```bash -REQUIRE ["foo", "bar"] -``` - -Useful when you use variables, for example for image name or tag (as shown above). In such case, you should specify the variable because otherwise the build doesn't make sense. - -`REQUIRE` does not affect the cache and it doesn't produce any layers. - -**Usage** -```bash -FROM google/golang:1.4 -… -CMD ["/bin/rocker"] -REQUIRE Version -PUSH grammarly/rocker:{{ .Version }} -``` - -So if we run the build not specifying the version variable (like `-var "Version=123"`), it will fail -```bash -$ rocker build -... -Error: Var $Version is required but not set -``` - -# INCLUDE - -```bash -INCLUDE path/to/mixin -``` -or -```bash -INCLUDE ../../path/to/mixin -``` - -Adds ability to include other Dockerfiles or Rockerfiles into your file. Useful if you have some collections of mixins on the side, such as a recipe to install nodejs or python, and want to use them. - -1. Paths passed to `INCLUDE` are relative to the Rockerfile's directory. -2. It is not allowed to nest includes, e.g. use `INCLUDE` in files which are being included. - -**Usage** -```bash -# includes/install_nodejs -RUN apt-get install nodejs -``` - -```bash -# Rockerfile -FROM debian:jessie -INCLUDE includes/install_nodejs -ADD . /src -WORKDIR /src -CMD ["node", "app.js"] -``` - # ATTACH ```bash ATTACH @@ -430,7 +366,7 @@ FROM phusion/passenger-ruby22 WORKDIR /src MOUNT /var/lib/gems -MOUNT $GIT_SSH_KEY:/root/.ssh/id_rsa +MOUNT {{ .Env.GIT_SSH_KEY }}:/root/.ssh/id_rsa MOUNT .:/src RUN ["bundle", "install"] @@ -469,7 +405,7 @@ gb build or build for all platforms: ```bash -make +make all ``` If you have a github access token, you can also do a github release: @@ -501,18 +437,6 @@ gb test rocker/... -run TestMyFunction # TODO -- [x] Correctly handle streaming TTY from Docker, so we can show fancy progress bars -- [x] rocker build --attach? possibly allow to attach to a running container within build, so can run interactively; may be useful for dev images -- [ ] run own tar stream so there is no need to put a generated dockerfile into a working directory -- [ ] write reamde about rocker cli -- [ ] colorful output for terminals -- [ ] Should the same mounts be reused between different FROMs? -- [ ] rocker inspect; inspecting a Rockerfile - whilch mount/export containers are there -- [ ] SQUASH as discussed [here](https://github.com/docker/docker/issues/332) -- [ ] do not store properties in an image -- [ ] Read Rockerfile from stdin -- [ ] Make more TODOs here - ```bash grep -R TODO **/*.go | grep -v '^vendor/' ```